VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 87815

Last change on this file since 87815 was 87636, checked in by vboxsync, 4 years ago

VMM/IEM: Need to set IEM_XCPT_FLAGS_ERR for GP faults as well in IEMInjectTrap() or the exception stack frame isn't correct upsetting guests

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 651.3 KB
Line 
1/* $Id: IEMAll.cpp 87636 2021-02-08 11:41:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 */
1197DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers)
1198{
1199 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1200 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1209
1210 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1211 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1212 pVCpu->iem.s.enmCpuMode = enmMode;
1213 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1214 pVCpu->iem.s.enmEffAddrMode = enmMode;
1215 if (enmMode != IEMMODE_64BIT)
1216 {
1217 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1218 pVCpu->iem.s.enmEffOpSize = enmMode;
1219 }
1220 else
1221 {
1222 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1223 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1224 }
1225 pVCpu->iem.s.fPrefixes = 0;
1226 pVCpu->iem.s.uRexReg = 0;
1227 pVCpu->iem.s.uRexB = 0;
1228 pVCpu->iem.s.uRexIndex = 0;
1229 pVCpu->iem.s.idxPrefix = 0;
1230 pVCpu->iem.s.uVex3rdReg = 0;
1231 pVCpu->iem.s.uVexLength = 0;
1232 pVCpu->iem.s.fEvexStuff = 0;
1233 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1234#ifdef IEM_WITH_CODE_TLB
1235 pVCpu->iem.s.pbInstrBuf = NULL;
1236 pVCpu->iem.s.offInstrNextByte = 0;
1237 pVCpu->iem.s.offCurInstrStart = 0;
1238# ifdef VBOX_STRICT
1239 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1240 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1241 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1242# endif
1243#else
1244 pVCpu->iem.s.offOpcode = 0;
1245 pVCpu->iem.s.cbOpcode = 0;
1246#endif
1247 pVCpu->iem.s.offModRm = 0;
1248 pVCpu->iem.s.cActiveMappings = 0;
1249 pVCpu->iem.s.iNextMapping = 0;
1250 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1251 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1252
1253#ifdef DBGFTRACE_ENABLED
1254 switch (enmMode)
1255 {
1256 case IEMMODE_64BIT:
1257 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1258 break;
1259 case IEMMODE_32BIT:
1260 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1261 break;
1262 case IEMMODE_16BIT:
1263 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1264 break;
1265 }
1266#endif
1267}
1268
1269
1270/**
1271 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1272 *
1273 * This is mostly a copy of iemInitDecoder.
1274 *
1275 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1276 */
1277DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1278{
1279 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1281 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1288
1289 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1290 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1291 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1292 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1293 pVCpu->iem.s.enmEffAddrMode = enmMode;
1294 if (enmMode != IEMMODE_64BIT)
1295 {
1296 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1297 pVCpu->iem.s.enmEffOpSize = enmMode;
1298 }
1299 else
1300 {
1301 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1302 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1303 }
1304 pVCpu->iem.s.fPrefixes = 0;
1305 pVCpu->iem.s.uRexReg = 0;
1306 pVCpu->iem.s.uRexB = 0;
1307 pVCpu->iem.s.uRexIndex = 0;
1308 pVCpu->iem.s.idxPrefix = 0;
1309 pVCpu->iem.s.uVex3rdReg = 0;
1310 pVCpu->iem.s.uVexLength = 0;
1311 pVCpu->iem.s.fEvexStuff = 0;
1312 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1313#ifdef IEM_WITH_CODE_TLB
1314 if (pVCpu->iem.s.pbInstrBuf)
1315 {
1316 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1317 - pVCpu->iem.s.uInstrBufPc;
1318 if (off < pVCpu->iem.s.cbInstrBufTotal)
1319 {
1320 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1321 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1322 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1323 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1324 else
1325 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1326 }
1327 else
1328 {
1329 pVCpu->iem.s.pbInstrBuf = NULL;
1330 pVCpu->iem.s.offInstrNextByte = 0;
1331 pVCpu->iem.s.offCurInstrStart = 0;
1332 pVCpu->iem.s.cbInstrBuf = 0;
1333 pVCpu->iem.s.cbInstrBufTotal = 0;
1334 }
1335 }
1336 else
1337 {
1338 pVCpu->iem.s.offInstrNextByte = 0;
1339 pVCpu->iem.s.offCurInstrStart = 0;
1340 pVCpu->iem.s.cbInstrBuf = 0;
1341 pVCpu->iem.s.cbInstrBufTotal = 0;
1342 }
1343#else
1344 pVCpu->iem.s.cbOpcode = 0;
1345 pVCpu->iem.s.offOpcode = 0;
1346#endif
1347 pVCpu->iem.s.offModRm = 0;
1348 Assert(pVCpu->iem.s.cActiveMappings == 0);
1349 pVCpu->iem.s.iNextMapping = 0;
1350 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1351 Assert(pVCpu->iem.s.fBypassHandlers == false);
1352
1353#ifdef DBGFTRACE_ENABLED
1354 switch (enmMode)
1355 {
1356 case IEMMODE_64BIT:
1357 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1358 break;
1359 case IEMMODE_32BIT:
1360 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1361 break;
1362 case IEMMODE_16BIT:
1363 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1364 break;
1365 }
1366#endif
1367}
1368
1369
1370
1371/**
1372 * Prefetch opcodes the first time when starting executing.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pVCpu The cross context virtual CPU structure of the
1376 * calling thread.
1377 * @param fBypassHandlers Whether to bypass access handlers.
1378 */
1379IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers)
1380{
1381 iemInitDecoder(pVCpu, fBypassHandlers);
1382
1383#ifdef IEM_WITH_CODE_TLB
1384 /** @todo Do ITLB lookup here. */
1385
1386#else /* !IEM_WITH_CODE_TLB */
1387
1388 /*
1389 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1390 *
1391 * First translate CS:rIP to a physical address.
1392 */
1393 uint32_t cbToTryRead;
1394 RTGCPTR GCPtrPC;
1395 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1396 {
1397 cbToTryRead = PAGE_SIZE;
1398 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1399 if (IEM_IS_CANONICAL(GCPtrPC))
1400 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1401 else
1402 return iemRaiseGeneralProtectionFault0(pVCpu);
1403 }
1404 else
1405 {
1406 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1407 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1408 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1409 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1410 else
1411 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1412 if (cbToTryRead) { /* likely */ }
1413 else /* overflowed */
1414 {
1415 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1416 cbToTryRead = UINT32_MAX;
1417 }
1418 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1419 Assert(GCPtrPC <= UINT32_MAX);
1420 }
1421
1422 RTGCPHYS GCPhys;
1423 uint64_t fFlags;
1424 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1425 if (RT_SUCCESS(rc)) { /* probable */ }
1426 else
1427 {
1428 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1429 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1430 }
1431 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1436 }
1437 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1444 /** @todo Check reserved bits and such stuff. PGM is better at doing
1445 * that, so do it when implementing the guest virtual address
1446 * TLB... */
1447
1448 /*
1449 * Read the bytes at this address.
1450 */
1451 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1452 if (cbToTryRead > cbLeftOnPage)
1453 cbToTryRead = cbLeftOnPage;
1454 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1455 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1456
1457 if (!pVCpu->iem.s.fBypassHandlers)
1458 {
1459 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1460 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1461 { /* likely */ }
1462 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1463 {
1464 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1465 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1466 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1467 }
1468 else
1469 {
1470 Log((RT_SUCCESS(rcStrict)
1471 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1472 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 return rcStrict;
1475 }
1476 }
1477 else
1478 {
1479 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1480 if (RT_SUCCESS(rc))
1481 { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1485 GCPtrPC, GCPhys, rc, cbToTryRead));
1486 return rc;
1487 }
1488 }
1489 pVCpu->iem.s.cbOpcode = cbToTryRead;
1490#endif /* !IEM_WITH_CODE_TLB */
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Invalidates the IEM TLBs.
1497 *
1498 * This is called internally as well as by PGM when moving GC mappings.
1499 *
1500 * @returns
1501 * @param pVCpu The cross context virtual CPU structure of the calling
1502 * thread.
1503 * @param fVmm Set when PGM calls us with a remapping.
1504 */
1505VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1506{
1507#ifdef IEM_WITH_CODE_TLB
1508 pVCpu->iem.s.cbInstrBufTotal = 0;
1509 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1510 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1511 { /* very likely */ }
1512 else
1513 {
1514 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1515 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1516 while (i-- > 0)
1517 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1518 }
1519#endif
1520
1521#ifdef IEM_WITH_DATA_TLB
1522 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1523 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1524 { /* very likely */ }
1525 else
1526 {
1527 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1528 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1529 while (i-- > 0)
1530 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1531 }
1532#endif
1533 NOREF(pVCpu); NOREF(fVmm);
1534}
1535
1536
1537/**
1538 * Invalidates a page in the TLBs.
1539 *
1540 * @param pVCpu The cross context virtual CPU structure of the calling
1541 * thread.
1542 * @param GCPtr The address of the page to invalidate
1543 */
1544VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1545{
1546#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1547 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1549 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1550 uintptr_t idx = (uint8_t)GCPtr;
1551
1552# ifdef IEM_WITH_CODE_TLB
1553 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1554 {
1555 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1556 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1557 pVCpu->iem.s.cbInstrBufTotal = 0;
1558 }
1559# endif
1560
1561# ifdef IEM_WITH_DATA_TLB
1562 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1563 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1564# endif
1565#else
1566 NOREF(pVCpu); NOREF(GCPtr);
1567#endif
1568}
1569
1570
1571/**
1572 * Invalidates the host physical aspects of the IEM TLBs.
1573 *
1574 * This is called internally as well as by PGM when moving GC mappings.
1575 *
1576 * @param pVCpu The cross context virtual CPU structure of the calling
1577 * thread.
1578 */
1579VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1580{
1581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1582 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1583
1584# ifdef IEM_WITH_CODE_TLB
1585 pVCpu->iem.s.cbInstrBufTotal = 0;
1586# endif
1587 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1588 if (uTlbPhysRev != 0)
1589 {
1590 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1591 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1592 }
1593 else
1594 {
1595 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1596 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1597
1598 unsigned i;
1599# ifdef IEM_WITH_CODE_TLB
1600 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1601 while (i-- > 0)
1602 {
1603 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1604 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1605 }
1606# endif
1607# ifdef IEM_WITH_DATA_TLB
1608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1609 while (i-- > 0)
1610 {
1611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1613 }
1614# endif
1615 }
1616#else
1617 NOREF(pVCpu);
1618#endif
1619}
1620
1621
1622/**
1623 * Invalidates the host physical aspects of the IEM TLBs.
1624 *
1625 * This is called internally as well as by PGM when moving GC mappings.
1626 *
1627 * @param pVM The cross context VM structure.
1628 *
1629 * @remarks Caller holds the PGM lock.
1630 */
1631VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1632{
1633 RT_NOREF_PV(pVM);
1634}
1635
1636#ifdef IEM_WITH_CODE_TLB
1637
1638/**
1639 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1640 * failure and jumps.
1641 *
1642 * We end up here for a number of reasons:
1643 * - pbInstrBuf isn't yet initialized.
1644 * - Advancing beyond the buffer boundrary (e.g. cross page).
1645 * - Advancing beyond the CS segment limit.
1646 * - Fetching from non-mappable page (e.g. MMIO).
1647 *
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param pvDst Where to return the bytes.
1651 * @param cbDst Number of bytes to read.
1652 *
1653 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1654 */
1655IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1656{
1657#ifdef IN_RING3
1658 for (;;)
1659 {
1660 Assert(cbDst <= 8);
1661 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1662
1663 /*
1664 * We might have a partial buffer match, deal with that first to make the
1665 * rest simpler. This is the first part of the cross page/buffer case.
1666 */
1667 if (pVCpu->iem.s.pbInstrBuf != NULL)
1668 {
1669 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1670 {
1671 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1672 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1673 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1674
1675 cbDst -= cbCopy;
1676 pvDst = (uint8_t *)pvDst + cbCopy;
1677 offBuf += cbCopy;
1678 pVCpu->iem.s.offInstrNextByte += offBuf;
1679 }
1680 }
1681
1682 /*
1683 * Check segment limit, figuring how much we're allowed to access at this point.
1684 *
1685 * We will fault immediately if RIP is past the segment limit / in non-canonical
1686 * territory. If we do continue, there are one or more bytes to read before we
1687 * end up in trouble and we need to do that first before faulting.
1688 */
1689 RTGCPTR GCPtrFirst;
1690 uint32_t cbMaxRead;
1691 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1692 {
1693 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1694 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1695 { /* likely */ }
1696 else
1697 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1698 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1699 }
1700 else
1701 {
1702 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1703 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1704 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1705 { /* likely */ }
1706 else
1707 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1708 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1709 if (cbMaxRead != 0)
1710 { /* likely */ }
1711 else
1712 {
1713 /* Overflowed because address is 0 and limit is max. */
1714 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1715 cbMaxRead = X86_PAGE_SIZE;
1716 }
1717 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1718 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1719 if (cbMaxRead2 < cbMaxRead)
1720 cbMaxRead = cbMaxRead2;
1721 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1722 }
1723
1724 /*
1725 * Get the TLB entry for this piece of code.
1726 */
1727 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1728 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1729 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1730 if (pTlbe->uTag == uTag)
1731 {
1732 /* likely when executing lots of code, otherwise unlikely */
1733# ifdef VBOX_WITH_STATISTICS
1734 pVCpu->iem.s.CodeTlb.cTlbHits++;
1735# endif
1736 }
1737 else
1738 {
1739 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1740 RTGCPHYS GCPhys;
1741 uint64_t fFlags;
1742 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1743 if (RT_FAILURE(rc))
1744 {
1745 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1746 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1747 }
1748
1749 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1750 pTlbe->uTag = uTag;
1751 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1752 pTlbe->GCPhys = GCPhys;
1753 pTlbe->pbMappingR3 = NULL;
1754 }
1755
1756 /*
1757 * Check TLB page table level access flags.
1758 */
1759 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1760 {
1761 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1762 {
1763 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1764 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1765 }
1766 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1770 }
1771 }
1772
1773 /*
1774 * Look up the physical page info if necessary.
1775 */
1776 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1777 { /* not necessary */ }
1778 else
1779 {
1780 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1782 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1783 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1784 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1785 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1786 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1787 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1788 }
1789
1790# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1791 /*
1792 * Try do a direct read using the pbMappingR3 pointer.
1793 */
1794 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1795 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1796 {
1797 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1798 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1799 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1800 {
1801 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1802 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1803 }
1804 else
1805 {
1806 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1807 Assert(cbInstr < cbMaxRead);
1808 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1809 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1810 }
1811 if (cbDst <= cbMaxRead)
1812 {
1813 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1814 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1815 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1816 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1817 return;
1818 }
1819 pVCpu->iem.s.pbInstrBuf = NULL;
1820
1821 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1822 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1823 }
1824 else
1825# endif
1826#if 0
1827 /*
1828 * If there is no special read handling, so we can read a bit more and
1829 * put it in the prefetch buffer.
1830 */
1831 if ( cbDst < cbMaxRead
1832 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1833 {
1834 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1835 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1836 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1837 { /* likely */ }
1838 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1839 {
1840 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1841 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1842 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1843 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1844 }
1845 else
1846 {
1847 Log((RT_SUCCESS(rcStrict)
1848 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1849 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1850 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1851 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1852 }
1853 }
1854 /*
1855 * Special read handling, so only read exactly what's needed.
1856 * This is a highly unlikely scenario.
1857 */
1858 else
1859#endif
1860 {
1861 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1862 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1863 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1864 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1865 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1866 { /* likely */ }
1867 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1868 {
1869 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1870 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1871 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1872 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1873 }
1874 else
1875 {
1876 Log((RT_SUCCESS(rcStrict)
1877 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1878 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1879 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1880 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1881 }
1882 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1883 if (cbToRead == cbDst)
1884 return;
1885 }
1886
1887 /*
1888 * More to read, loop.
1889 */
1890 cbDst -= cbMaxRead;
1891 pvDst = (uint8_t *)pvDst + cbMaxRead;
1892 }
1893#else
1894 RT_NOREF(pvDst, cbDst);
1895 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1896#endif
1897}
1898
1899#else
1900
1901/**
1902 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1903 * exception if it fails.
1904 *
1905 * @returns Strict VBox status code.
1906 * @param pVCpu The cross context virtual CPU structure of the
1907 * calling thread.
1908 * @param cbMin The minimum number of bytes relative offOpcode
1909 * that must be read.
1910 */
1911IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1912{
1913 /*
1914 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1915 *
1916 * First translate CS:rIP to a physical address.
1917 */
1918 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1919 uint32_t cbToTryRead;
1920 RTGCPTR GCPtrNext;
1921 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1922 {
1923 cbToTryRead = PAGE_SIZE;
1924 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1925 if (!IEM_IS_CANONICAL(GCPtrNext))
1926 return iemRaiseGeneralProtectionFault0(pVCpu);
1927 }
1928 else
1929 {
1930 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1931 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1932 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1933 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1934 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1935 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1936 if (!cbToTryRead) /* overflowed */
1937 {
1938 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1939 cbToTryRead = UINT32_MAX;
1940 /** @todo check out wrapping around the code segment. */
1941 }
1942 if (cbToTryRead < cbMin - cbLeft)
1943 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1944 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1945 }
1946
1947 /* Only read up to the end of the page, and make sure we don't read more
1948 than the opcode buffer can hold. */
1949 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1950 if (cbToTryRead > cbLeftOnPage)
1951 cbToTryRead = cbLeftOnPage;
1952 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1953 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1954/** @todo r=bird: Convert assertion into undefined opcode exception? */
1955 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1956
1957 RTGCPHYS GCPhys;
1958 uint64_t fFlags;
1959 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1960 if (RT_FAILURE(rc))
1961 {
1962 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1963 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1964 }
1965 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1966 {
1967 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1968 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1969 }
1970 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1971 {
1972 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1973 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1974 }
1975 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1976 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1977 /** @todo Check reserved bits and such stuff. PGM is better at doing
1978 * that, so do it when implementing the guest virtual address
1979 * TLB... */
1980
1981 /*
1982 * Read the bytes at this address.
1983 *
1984 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1985 * and since PATM should only patch the start of an instruction there
1986 * should be no need to check again here.
1987 */
1988 if (!pVCpu->iem.s.fBypassHandlers)
1989 {
1990 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1991 cbToTryRead, PGMACCESSORIGIN_IEM);
1992 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1993 { /* likely */ }
1994 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1995 {
1996 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1997 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1998 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1999 }
2000 else
2001 {
2002 Log((RT_SUCCESS(rcStrict)
2003 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2004 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2005 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2006 return rcStrict;
2007 }
2008 }
2009 else
2010 {
2011 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2012 if (RT_SUCCESS(rc))
2013 { /* likely */ }
2014 else
2015 {
2016 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2017 return rc;
2018 }
2019 }
2020 pVCpu->iem.s.cbOpcode += cbToTryRead;
2021 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2022
2023 return VINF_SUCCESS;
2024}
2025
2026#endif /* !IEM_WITH_CODE_TLB */
2027#ifndef IEM_WITH_SETJMP
2028
2029/**
2030 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2031 *
2032 * @returns Strict VBox status code.
2033 * @param pVCpu The cross context virtual CPU structure of the
2034 * calling thread.
2035 * @param pb Where to return the opcode byte.
2036 */
2037DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2038{
2039 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2040 if (rcStrict == VINF_SUCCESS)
2041 {
2042 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2043 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2044 pVCpu->iem.s.offOpcode = offOpcode + 1;
2045 }
2046 else
2047 *pb = 0;
2048 return rcStrict;
2049}
2050
2051
2052/**
2053 * Fetches the next opcode byte.
2054 *
2055 * @returns Strict VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure of the
2057 * calling thread.
2058 * @param pu8 Where to return the opcode byte.
2059 */
2060DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2061{
2062 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2067 return VINF_SUCCESS;
2068 }
2069 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2070}
2071
2072#else /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2076 *
2077 * @returns The opcode byte.
2078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2079 */
2080DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2081{
2082# ifdef IEM_WITH_CODE_TLB
2083 uint8_t u8;
2084 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2085 return u8;
2086# else
2087 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2088 if (rcStrict == VINF_SUCCESS)
2089 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2090 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2091# endif
2092}
2093
2094
2095/**
2096 * Fetches the next opcode byte, longjmp on error.
2097 *
2098 * @returns The opcode byte.
2099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2100 */
2101DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2102{
2103# ifdef IEM_WITH_CODE_TLB
2104 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2105 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2106 if (RT_LIKELY( pbBuf != NULL
2107 && offBuf < pVCpu->iem.s.cbInstrBuf))
2108 {
2109 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2110 return pbBuf[offBuf];
2111 }
2112# else
2113 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2114 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2115 {
2116 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2117 return pVCpu->iem.s.abOpcode[offOpcode];
2118 }
2119# endif
2120 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2121}
2122
2123#endif /* IEM_WITH_SETJMP */
2124
2125/**
2126 * Fetches the next opcode byte, returns automatically on failure.
2127 *
2128 * @param a_pu8 Where to return the opcode byte.
2129 * @remark Implicitly references pVCpu.
2130 */
2131#ifndef IEM_WITH_SETJMP
2132# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2133 do \
2134 { \
2135 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2136 if (rcStrict2 == VINF_SUCCESS) \
2137 { /* likely */ } \
2138 else \
2139 return rcStrict2; \
2140 } while (0)
2141#else
2142# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2143#endif /* IEM_WITH_SETJMP */
2144
2145
2146#ifndef IEM_WITH_SETJMP
2147/**
2148 * Fetches the next signed byte from the opcode stream.
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 * @param pi8 Where to return the signed byte.
2153 */
2154DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2155{
2156 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2157}
2158#endif /* !IEM_WITH_SETJMP */
2159
2160
2161/**
2162 * Fetches the next signed byte from the opcode stream, returning automatically
2163 * on failure.
2164 *
2165 * @param a_pi8 Where to return the signed byte.
2166 * @remark Implicitly references pVCpu.
2167 */
2168#ifndef IEM_WITH_SETJMP
2169# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2170 do \
2171 { \
2172 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2173 if (rcStrict2 != VINF_SUCCESS) \
2174 return rcStrict2; \
2175 } while (0)
2176#else /* IEM_WITH_SETJMP */
2177# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181#ifndef IEM_WITH_SETJMP
2182
2183/**
2184 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2185 *
2186 * @returns Strict VBox status code.
2187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2188 * @param pu16 Where to return the opcode dword.
2189 */
2190DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2191{
2192 uint8_t u8;
2193 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2194 if (rcStrict == VINF_SUCCESS)
2195 *pu16 = (int8_t)u8;
2196 return rcStrict;
2197}
2198
2199
2200/**
2201 * Fetches the next signed byte from the opcode stream, extending it to
2202 * unsigned 16-bit.
2203 *
2204 * @returns Strict VBox status code.
2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2206 * @param pu16 Where to return the unsigned word.
2207 */
2208DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2209{
2210 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2211 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2212 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2213
2214 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2215 pVCpu->iem.s.offOpcode = offOpcode + 1;
2216 return VINF_SUCCESS;
2217}
2218
2219#endif /* !IEM_WITH_SETJMP */
2220
2221/**
2222 * Fetches the next signed byte from the opcode stream and sign-extending it to
2223 * a word, returning automatically on failure.
2224 *
2225 * @param a_pu16 Where to return the word.
2226 * @remark Implicitly references pVCpu.
2227 */
2228#ifndef IEM_WITH_SETJMP
2229# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2230 do \
2231 { \
2232 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2233 if (rcStrict2 != VINF_SUCCESS) \
2234 return rcStrict2; \
2235 } while (0)
2236#else
2237# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2238#endif
2239
2240#ifndef IEM_WITH_SETJMP
2241
2242/**
2243 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2244 *
2245 * @returns Strict VBox status code.
2246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2247 * @param pu32 Where to return the opcode dword.
2248 */
2249DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2250{
2251 uint8_t u8;
2252 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2253 if (rcStrict == VINF_SUCCESS)
2254 *pu32 = (int8_t)u8;
2255 return rcStrict;
2256}
2257
2258
2259/**
2260 * Fetches the next signed byte from the opcode stream, extending it to
2261 * unsigned 32-bit.
2262 *
2263 * @returns Strict VBox status code.
2264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2265 * @param pu32 Where to return the unsigned dword.
2266 */
2267DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2268{
2269 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2270 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2271 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2272
2273 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2274 pVCpu->iem.s.offOpcode = offOpcode + 1;
2275 return VINF_SUCCESS;
2276}
2277
2278#endif /* !IEM_WITH_SETJMP */
2279
2280/**
2281 * Fetches the next signed byte from the opcode stream and sign-extending it to
2282 * a word, returning automatically on failure.
2283 *
2284 * @param a_pu32 Where to return the word.
2285 * @remark Implicitly references pVCpu.
2286 */
2287#ifndef IEM_WITH_SETJMP
2288#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2289 do \
2290 { \
2291 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2292 if (rcStrict2 != VINF_SUCCESS) \
2293 return rcStrict2; \
2294 } while (0)
2295#else
2296# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2297#endif
2298
2299#ifndef IEM_WITH_SETJMP
2300
2301/**
2302 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2303 *
2304 * @returns Strict VBox status code.
2305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2306 * @param pu64 Where to return the opcode qword.
2307 */
2308DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2309{
2310 uint8_t u8;
2311 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2312 if (rcStrict == VINF_SUCCESS)
2313 *pu64 = (int8_t)u8;
2314 return rcStrict;
2315}
2316
2317
2318/**
2319 * Fetches the next signed byte from the opcode stream, extending it to
2320 * unsigned 64-bit.
2321 *
2322 * @returns Strict VBox status code.
2323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2324 * @param pu64 Where to return the unsigned qword.
2325 */
2326DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2327{
2328 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2329 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2330 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2331
2332 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2333 pVCpu->iem.s.offOpcode = offOpcode + 1;
2334 return VINF_SUCCESS;
2335}
2336
2337#endif /* !IEM_WITH_SETJMP */
2338
2339
2340/**
2341 * Fetches the next signed byte from the opcode stream and sign-extending it to
2342 * a word, returning automatically on failure.
2343 *
2344 * @param a_pu64 Where to return the word.
2345 * @remark Implicitly references pVCpu.
2346 */
2347#ifndef IEM_WITH_SETJMP
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2349 do \
2350 { \
2351 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2352 if (rcStrict2 != VINF_SUCCESS) \
2353 return rcStrict2; \
2354 } while (0)
2355#else
2356# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2357#endif
2358
2359
2360#ifndef IEM_WITH_SETJMP
2361/**
2362 * Fetches the next opcode byte.
2363 *
2364 * @returns Strict VBox status code.
2365 * @param pVCpu The cross context virtual CPU structure of the
2366 * calling thread.
2367 * @param pu8 Where to return the opcode byte.
2368 */
2369DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2370{
2371 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2372 pVCpu->iem.s.offModRm = offOpcode;
2373 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2374 {
2375 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2376 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2377 return VINF_SUCCESS;
2378 }
2379 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2380}
2381#else /* IEM_WITH_SETJMP */
2382/**
2383 * Fetches the next opcode byte, longjmp on error.
2384 *
2385 * @returns The opcode byte.
2386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2387 */
2388DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2389{
2390# ifdef IEM_WITH_CODE_TLB
2391 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2392 pVCpu->iem.s.offModRm = offBuf;
2393 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2394 if (RT_LIKELY( pbBuf != NULL
2395 && offBuf < pVCpu->iem.s.cbInstrBuf))
2396 {
2397 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2398 return pbBuf[offBuf];
2399 }
2400# else
2401 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2402 pVCpu->iem.s.offModRm = offOpcode;
2403 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2404 {
2405 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2406 return pVCpu->iem.s.abOpcode[offOpcode];
2407 }
2408# endif
2409 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2410}
2411#endif /* IEM_WITH_SETJMP */
2412
2413/**
2414 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2415 * on failure.
2416 *
2417 * Will note down the position of the ModR/M byte for VT-x exits.
2418 *
2419 * @param a_pbRm Where to return the RM opcode byte.
2420 * @remark Implicitly references pVCpu.
2421 */
2422#ifndef IEM_WITH_SETJMP
2423# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2424 do \
2425 { \
2426 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2427 if (rcStrict2 == VINF_SUCCESS) \
2428 { /* likely */ } \
2429 else \
2430 return rcStrict2; \
2431 } while (0)
2432#else
2433# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2434#endif /* IEM_WITH_SETJMP */
2435
2436
2437#ifndef IEM_WITH_SETJMP
2438
2439/**
2440 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2441 *
2442 * @returns Strict VBox status code.
2443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2444 * @param pu16 Where to return the opcode word.
2445 */
2446DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2447{
2448 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2449 if (rcStrict == VINF_SUCCESS)
2450 {
2451 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2452# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2453 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2454# else
2455 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2456# endif
2457 pVCpu->iem.s.offOpcode = offOpcode + 2;
2458 }
2459 else
2460 *pu16 = 0;
2461 return rcStrict;
2462}
2463
2464
2465/**
2466 * Fetches the next opcode word.
2467 *
2468 * @returns Strict VBox status code.
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 * @param pu16 Where to return the opcode word.
2471 */
2472DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2473{
2474 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2475 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2476 {
2477 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2478# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2479 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2480# else
2481 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2482# endif
2483 return VINF_SUCCESS;
2484 }
2485 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2486}
2487
2488#else /* IEM_WITH_SETJMP */
2489
2490/**
2491 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2492 *
2493 * @returns The opcode word.
2494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2495 */
2496DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2497{
2498# ifdef IEM_WITH_CODE_TLB
2499 uint16_t u16;
2500 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2501 return u16;
2502# else
2503 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2504 if (rcStrict == VINF_SUCCESS)
2505 {
2506 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2507 pVCpu->iem.s.offOpcode += 2;
2508# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2509 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2510# else
2511 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2512# endif
2513 }
2514 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2515# endif
2516}
2517
2518
2519/**
2520 * Fetches the next opcode word, longjmp on error.
2521 *
2522 * @returns The opcode word.
2523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2524 */
2525DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2526{
2527# ifdef IEM_WITH_CODE_TLB
2528 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2529 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2530 if (RT_LIKELY( pbBuf != NULL
2531 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2532 {
2533 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2534# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2535 return *(uint16_t const *)&pbBuf[offBuf];
2536# else
2537 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2538# endif
2539 }
2540# else
2541 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2542 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2543 {
2544 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2546 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2547# else
2548 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2549# endif
2550 }
2551# endif
2552 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2553}
2554
2555#endif /* IEM_WITH_SETJMP */
2556
2557
2558/**
2559 * Fetches the next opcode word, returns automatically on failure.
2560 *
2561 * @param a_pu16 Where to return the opcode word.
2562 * @remark Implicitly references pVCpu.
2563 */
2564#ifndef IEM_WITH_SETJMP
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2566 do \
2567 { \
2568 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2569 if (rcStrict2 != VINF_SUCCESS) \
2570 return rcStrict2; \
2571 } while (0)
2572#else
2573# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2574#endif
2575
2576#ifndef IEM_WITH_SETJMP
2577
2578/**
2579 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2580 *
2581 * @returns Strict VBox status code.
2582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2583 * @param pu32 Where to return the opcode double word.
2584 */
2585DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2586{
2587 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2588 if (rcStrict == VINF_SUCCESS)
2589 {
2590 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2591 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2592 pVCpu->iem.s.offOpcode = offOpcode + 2;
2593 }
2594 else
2595 *pu32 = 0;
2596 return rcStrict;
2597}
2598
2599
2600/**
2601 * Fetches the next opcode word, zero extending it to a double word.
2602 *
2603 * @returns Strict VBox status code.
2604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2605 * @param pu32 Where to return the opcode double word.
2606 */
2607DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2608{
2609 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2610 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2611 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2612
2613 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2614 pVCpu->iem.s.offOpcode = offOpcode + 2;
2615 return VINF_SUCCESS;
2616}
2617
2618#endif /* !IEM_WITH_SETJMP */
2619
2620
2621/**
2622 * Fetches the next opcode word and zero extends it to a double word, returns
2623 * automatically on failure.
2624 *
2625 * @param a_pu32 Where to return the opcode double word.
2626 * @remark Implicitly references pVCpu.
2627 */
2628#ifndef IEM_WITH_SETJMP
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2630 do \
2631 { \
2632 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2633 if (rcStrict2 != VINF_SUCCESS) \
2634 return rcStrict2; \
2635 } while (0)
2636#else
2637# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2638#endif
2639
2640#ifndef IEM_WITH_SETJMP
2641
2642/**
2643 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2644 *
2645 * @returns Strict VBox status code.
2646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2647 * @param pu64 Where to return the opcode quad word.
2648 */
2649DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2650{
2651 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2652 if (rcStrict == VINF_SUCCESS)
2653 {
2654 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2655 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2656 pVCpu->iem.s.offOpcode = offOpcode + 2;
2657 }
2658 else
2659 *pu64 = 0;
2660 return rcStrict;
2661}
2662
2663
2664/**
2665 * Fetches the next opcode word, zero extending it to a quad word.
2666 *
2667 * @returns Strict VBox status code.
2668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2669 * @param pu64 Where to return the opcode quad word.
2670 */
2671DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2672{
2673 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2674 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2675 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2676
2677 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2678 pVCpu->iem.s.offOpcode = offOpcode + 2;
2679 return VINF_SUCCESS;
2680}
2681
2682#endif /* !IEM_WITH_SETJMP */
2683
2684/**
2685 * Fetches the next opcode word and zero extends it to a quad word, returns
2686 * automatically on failure.
2687 *
2688 * @param a_pu64 Where to return the opcode quad word.
2689 * @remark Implicitly references pVCpu.
2690 */
2691#ifndef IEM_WITH_SETJMP
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2693 do \
2694 { \
2695 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2696 if (rcStrict2 != VINF_SUCCESS) \
2697 return rcStrict2; \
2698 } while (0)
2699#else
2700# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2701#endif
2702
2703
2704#ifndef IEM_WITH_SETJMP
2705/**
2706 * Fetches the next signed word from the opcode stream.
2707 *
2708 * @returns Strict VBox status code.
2709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2710 * @param pi16 Where to return the signed word.
2711 */
2712DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2713{
2714 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2715}
2716#endif /* !IEM_WITH_SETJMP */
2717
2718
2719/**
2720 * Fetches the next signed word from the opcode stream, returning automatically
2721 * on failure.
2722 *
2723 * @param a_pi16 Where to return the signed word.
2724 * @remark Implicitly references pVCpu.
2725 */
2726#ifndef IEM_WITH_SETJMP
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2728 do \
2729 { \
2730 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2731 if (rcStrict2 != VINF_SUCCESS) \
2732 return rcStrict2; \
2733 } while (0)
2734#else
2735# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2736#endif
2737
2738#ifndef IEM_WITH_SETJMP
2739
2740/**
2741 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2742 *
2743 * @returns Strict VBox status code.
2744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2745 * @param pu32 Where to return the opcode dword.
2746 */
2747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2748{
2749 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2750 if (rcStrict == VINF_SUCCESS)
2751 {
2752 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2753# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2754 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2755# else
2756 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2757 pVCpu->iem.s.abOpcode[offOpcode + 1],
2758 pVCpu->iem.s.abOpcode[offOpcode + 2],
2759 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2760# endif
2761 pVCpu->iem.s.offOpcode = offOpcode + 4;
2762 }
2763 else
2764 *pu32 = 0;
2765 return rcStrict;
2766}
2767
2768
2769/**
2770 * Fetches the next opcode dword.
2771 *
2772 * @returns Strict VBox status code.
2773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2774 * @param pu32 Where to return the opcode double word.
2775 */
2776DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2777{
2778 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2779 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2780 {
2781 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2782# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2783 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2784# else
2785 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2786 pVCpu->iem.s.abOpcode[offOpcode + 1],
2787 pVCpu->iem.s.abOpcode[offOpcode + 2],
2788 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2789# endif
2790 return VINF_SUCCESS;
2791 }
2792 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2793}
2794
2795#else /* !IEM_WITH_SETJMP */
2796
2797/**
2798 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2799 *
2800 * @returns The opcode dword.
2801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2802 */
2803DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2804{
2805# ifdef IEM_WITH_CODE_TLB
2806 uint32_t u32;
2807 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2808 return u32;
2809# else
2810 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2811 if (rcStrict == VINF_SUCCESS)
2812 {
2813 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2814 pVCpu->iem.s.offOpcode = offOpcode + 4;
2815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2816 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2817# else
2818 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2819 pVCpu->iem.s.abOpcode[offOpcode + 1],
2820 pVCpu->iem.s.abOpcode[offOpcode + 2],
2821 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2822# endif
2823 }
2824 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2825# endif
2826}
2827
2828
2829/**
2830 * Fetches the next opcode dword, longjmp on error.
2831 *
2832 * @returns The opcode dword.
2833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2834 */
2835DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2836{
2837# ifdef IEM_WITH_CODE_TLB
2838 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2839 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2840 if (RT_LIKELY( pbBuf != NULL
2841 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2842 {
2843 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2844# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2845 return *(uint32_t const *)&pbBuf[offBuf];
2846# else
2847 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2848 pbBuf[offBuf + 1],
2849 pbBuf[offBuf + 2],
2850 pbBuf[offBuf + 3]);
2851# endif
2852 }
2853# else
2854 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2855 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2856 {
2857 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2858# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2859 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2860# else
2861 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2862 pVCpu->iem.s.abOpcode[offOpcode + 1],
2863 pVCpu->iem.s.abOpcode[offOpcode + 2],
2864 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2865# endif
2866 }
2867# endif
2868 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2869}
2870
2871#endif /* !IEM_WITH_SETJMP */
2872
2873
2874/**
2875 * Fetches the next opcode dword, returns automatically on failure.
2876 *
2877 * @param a_pu32 Where to return the opcode dword.
2878 * @remark Implicitly references pVCpu.
2879 */
2880#ifndef IEM_WITH_SETJMP
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2882 do \
2883 { \
2884 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2885 if (rcStrict2 != VINF_SUCCESS) \
2886 return rcStrict2; \
2887 } while (0)
2888#else
2889# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2890#endif
2891
2892#ifndef IEM_WITH_SETJMP
2893
2894/**
2895 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode dword.
2900 */
2901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2902{
2903 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2904 if (rcStrict == VINF_SUCCESS)
2905 {
2906 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2907 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2911 pVCpu->iem.s.offOpcode = offOpcode + 4;
2912 }
2913 else
2914 *pu64 = 0;
2915 return rcStrict;
2916}
2917
2918
2919/**
2920 * Fetches the next opcode dword, zero extending it to a quad word.
2921 *
2922 * @returns Strict VBox status code.
2923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2924 * @param pu64 Where to return the opcode quad word.
2925 */
2926DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2927{
2928 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2929 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2930 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2931
2932 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2933 pVCpu->iem.s.abOpcode[offOpcode + 1],
2934 pVCpu->iem.s.abOpcode[offOpcode + 2],
2935 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2936 pVCpu->iem.s.offOpcode = offOpcode + 4;
2937 return VINF_SUCCESS;
2938}
2939
2940#endif /* !IEM_WITH_SETJMP */
2941
2942
2943/**
2944 * Fetches the next opcode dword and zero extends it to a quad word, returns
2945 * automatically on failure.
2946 *
2947 * @param a_pu64 Where to return the opcode quad word.
2948 * @remark Implicitly references pVCpu.
2949 */
2950#ifndef IEM_WITH_SETJMP
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2952 do \
2953 { \
2954 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2955 if (rcStrict2 != VINF_SUCCESS) \
2956 return rcStrict2; \
2957 } while (0)
2958#else
2959# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2960#endif
2961
2962
2963#ifndef IEM_WITH_SETJMP
2964/**
2965 * Fetches the next signed double word from the opcode stream.
2966 *
2967 * @returns Strict VBox status code.
2968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2969 * @param pi32 Where to return the signed double word.
2970 */
2971DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2972{
2973 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2974}
2975#endif
2976
2977/**
2978 * Fetches the next signed double word from the opcode stream, returning
2979 * automatically on failure.
2980 *
2981 * @param a_pi32 Where to return the signed double word.
2982 * @remark Implicitly references pVCpu.
2983 */
2984#ifndef IEM_WITH_SETJMP
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2986 do \
2987 { \
2988 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2989 if (rcStrict2 != VINF_SUCCESS) \
2990 return rcStrict2; \
2991 } while (0)
2992#else
2993# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2994#endif
2995
2996#ifndef IEM_WITH_SETJMP
2997
2998/**
2999 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode qword.
3004 */
3005DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3006{
3007 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3008 if (rcStrict == VINF_SUCCESS)
3009 {
3010 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3011 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 pVCpu->iem.s.offOpcode = offOpcode + 4;
3016 }
3017 else
3018 *pu64 = 0;
3019 return rcStrict;
3020}
3021
3022
3023/**
3024 * Fetches the next opcode dword, sign extending it into a quad word.
3025 *
3026 * @returns Strict VBox status code.
3027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3028 * @param pu64 Where to return the opcode quad word.
3029 */
3030DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3031{
3032 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3033 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3034 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3035
3036 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3037 pVCpu->iem.s.abOpcode[offOpcode + 1],
3038 pVCpu->iem.s.abOpcode[offOpcode + 2],
3039 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3040 *pu64 = i32;
3041 pVCpu->iem.s.offOpcode = offOpcode + 4;
3042 return VINF_SUCCESS;
3043}
3044
3045#endif /* !IEM_WITH_SETJMP */
3046
3047
3048/**
3049 * Fetches the next opcode double word and sign extends it to a quad word,
3050 * returns automatically on failure.
3051 *
3052 * @param a_pu64 Where to return the opcode quad word.
3053 * @remark Implicitly references pVCpu.
3054 */
3055#ifndef IEM_WITH_SETJMP
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3057 do \
3058 { \
3059 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3060 if (rcStrict2 != VINF_SUCCESS) \
3061 return rcStrict2; \
3062 } while (0)
3063#else
3064# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3065#endif
3066
3067#ifndef IEM_WITH_SETJMP
3068
3069/**
3070 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3071 *
3072 * @returns Strict VBox status code.
3073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3074 * @param pu64 Where to return the opcode qword.
3075 */
3076DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3077{
3078 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3079 if (rcStrict == VINF_SUCCESS)
3080 {
3081 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3082# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3083 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3084# else
3085 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3086 pVCpu->iem.s.abOpcode[offOpcode + 1],
3087 pVCpu->iem.s.abOpcode[offOpcode + 2],
3088 pVCpu->iem.s.abOpcode[offOpcode + 3],
3089 pVCpu->iem.s.abOpcode[offOpcode + 4],
3090 pVCpu->iem.s.abOpcode[offOpcode + 5],
3091 pVCpu->iem.s.abOpcode[offOpcode + 6],
3092 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3093# endif
3094 pVCpu->iem.s.offOpcode = offOpcode + 8;
3095 }
3096 else
3097 *pu64 = 0;
3098 return rcStrict;
3099}
3100
3101
3102/**
3103 * Fetches the next opcode qword.
3104 *
3105 * @returns Strict VBox status code.
3106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3107 * @param pu64 Where to return the opcode qword.
3108 */
3109DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3110{
3111 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3112 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3113 {
3114# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3115 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3116# else
3117 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3118 pVCpu->iem.s.abOpcode[offOpcode + 1],
3119 pVCpu->iem.s.abOpcode[offOpcode + 2],
3120 pVCpu->iem.s.abOpcode[offOpcode + 3],
3121 pVCpu->iem.s.abOpcode[offOpcode + 4],
3122 pVCpu->iem.s.abOpcode[offOpcode + 5],
3123 pVCpu->iem.s.abOpcode[offOpcode + 6],
3124 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3125# endif
3126 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3127 return VINF_SUCCESS;
3128 }
3129 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3130}
3131
3132#else /* IEM_WITH_SETJMP */
3133
3134/**
3135 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3136 *
3137 * @returns The opcode qword.
3138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3139 */
3140DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3141{
3142# ifdef IEM_WITH_CODE_TLB
3143 uint64_t u64;
3144 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3145 return u64;
3146# else
3147 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3148 if (rcStrict == VINF_SUCCESS)
3149 {
3150 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3151 pVCpu->iem.s.offOpcode = offOpcode + 8;
3152# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3153 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3154# else
3155 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3156 pVCpu->iem.s.abOpcode[offOpcode + 1],
3157 pVCpu->iem.s.abOpcode[offOpcode + 2],
3158 pVCpu->iem.s.abOpcode[offOpcode + 3],
3159 pVCpu->iem.s.abOpcode[offOpcode + 4],
3160 pVCpu->iem.s.abOpcode[offOpcode + 5],
3161 pVCpu->iem.s.abOpcode[offOpcode + 6],
3162 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3163# endif
3164 }
3165 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3166# endif
3167}
3168
3169
3170/**
3171 * Fetches the next opcode qword, longjmp on error.
3172 *
3173 * @returns The opcode qword.
3174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3175 */
3176DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3177{
3178# ifdef IEM_WITH_CODE_TLB
3179 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3180 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3181 if (RT_LIKELY( pbBuf != NULL
3182 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3183 {
3184 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3185# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3186 return *(uint64_t const *)&pbBuf[offBuf];
3187# else
3188 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3189 pbBuf[offBuf + 1],
3190 pbBuf[offBuf + 2],
3191 pbBuf[offBuf + 3],
3192 pbBuf[offBuf + 4],
3193 pbBuf[offBuf + 5],
3194 pbBuf[offBuf + 6],
3195 pbBuf[offBuf + 7]);
3196# endif
3197 }
3198# else
3199 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3200 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3201 {
3202 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3203# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3204 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3205# else
3206 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3207 pVCpu->iem.s.abOpcode[offOpcode + 1],
3208 pVCpu->iem.s.abOpcode[offOpcode + 2],
3209 pVCpu->iem.s.abOpcode[offOpcode + 3],
3210 pVCpu->iem.s.abOpcode[offOpcode + 4],
3211 pVCpu->iem.s.abOpcode[offOpcode + 5],
3212 pVCpu->iem.s.abOpcode[offOpcode + 6],
3213 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3214# endif
3215 }
3216# endif
3217 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3218}
3219
3220#endif /* IEM_WITH_SETJMP */
3221
3222/**
3223 * Fetches the next opcode quad word, returns automatically on failure.
3224 *
3225 * @param a_pu64 Where to return the opcode quad word.
3226 * @remark Implicitly references pVCpu.
3227 */
3228#ifndef IEM_WITH_SETJMP
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3230 do \
3231 { \
3232 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3233 if (rcStrict2 != VINF_SUCCESS) \
3234 return rcStrict2; \
3235 } while (0)
3236#else
3237# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3238#endif
3239
3240
3241/** @name Misc Worker Functions.
3242 * @{
3243 */
3244
3245/**
3246 * Gets the exception class for the specified exception vector.
3247 *
3248 * @returns The class of the specified exception.
3249 * @param uVector The exception vector.
3250 */
3251IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3252{
3253 Assert(uVector <= X86_XCPT_LAST);
3254 switch (uVector)
3255 {
3256 case X86_XCPT_DE:
3257 case X86_XCPT_TS:
3258 case X86_XCPT_NP:
3259 case X86_XCPT_SS:
3260 case X86_XCPT_GP:
3261 case X86_XCPT_SX: /* AMD only */
3262 return IEMXCPTCLASS_CONTRIBUTORY;
3263
3264 case X86_XCPT_PF:
3265 case X86_XCPT_VE: /* Intel only */
3266 return IEMXCPTCLASS_PAGE_FAULT;
3267
3268 case X86_XCPT_DF:
3269 return IEMXCPTCLASS_DOUBLE_FAULT;
3270 }
3271 return IEMXCPTCLASS_BENIGN;
3272}
3273
3274
3275/**
3276 * Evaluates how to handle an exception caused during delivery of another event
3277 * (exception / interrupt).
3278 *
3279 * @returns How to handle the recursive exception.
3280 * @param pVCpu The cross context virtual CPU structure of the
3281 * calling thread.
3282 * @param fPrevFlags The flags of the previous event.
3283 * @param uPrevVector The vector of the previous event.
3284 * @param fCurFlags The flags of the current exception.
3285 * @param uCurVector The vector of the current exception.
3286 * @param pfXcptRaiseInfo Where to store additional information about the
3287 * exception condition. Optional.
3288 */
3289VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3290 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3291{
3292 /*
3293 * Only CPU exceptions can be raised while delivering other events, software interrupt
3294 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3295 */
3296 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3297 Assert(pVCpu); RT_NOREF(pVCpu);
3298 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3299
3300 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3301 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3302 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3303 {
3304 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3305 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3306 {
3307 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3308 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3309 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3310 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3311 {
3312 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3313 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3314 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3315 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3316 uCurVector, pVCpu->cpum.GstCtx.cr2));
3317 }
3318 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3319 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3320 {
3321 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3322 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3325 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3326 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3327 {
3328 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3329 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3330 }
3331 }
3332 else
3333 {
3334 if (uPrevVector == X86_XCPT_NMI)
3335 {
3336 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3337 if (uCurVector == X86_XCPT_PF)
3338 {
3339 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3340 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3341 }
3342 }
3343 else if ( uPrevVector == X86_XCPT_AC
3344 && uCurVector == X86_XCPT_AC)
3345 {
3346 enmRaise = IEMXCPTRAISE_CPU_HANG;
3347 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3348 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3349 }
3350 }
3351 }
3352 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3353 {
3354 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3355 if (uCurVector == X86_XCPT_PF)
3356 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3357 }
3358 else
3359 {
3360 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3361 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3362 }
3363
3364 if (pfXcptRaiseInfo)
3365 *pfXcptRaiseInfo = fRaiseInfo;
3366 return enmRaise;
3367}
3368
3369
3370/**
3371 * Enters the CPU shutdown state initiated by a triple fault or other
3372 * unrecoverable conditions.
3373 *
3374 * @returns Strict VBox status code.
3375 * @param pVCpu The cross context virtual CPU structure of the
3376 * calling thread.
3377 */
3378IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3379{
3380 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3381 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3382
3383 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3384 {
3385 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3386 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3387 }
3388
3389 RT_NOREF(pVCpu);
3390 return VINF_EM_TRIPLE_FAULT;
3391}
3392
3393
3394/**
3395 * Validates a new SS segment.
3396 *
3397 * @returns VBox strict status code.
3398 * @param pVCpu The cross context virtual CPU structure of the
3399 * calling thread.
3400 * @param NewSS The new SS selctor.
3401 * @param uCpl The CPL to load the stack for.
3402 * @param pDesc Where to return the descriptor.
3403 */
3404IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3405{
3406 /* Null selectors are not allowed (we're not called for dispatching
3407 interrupts with SS=0 in long mode). */
3408 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3409 {
3410 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3411 return iemRaiseTaskSwitchFault0(pVCpu);
3412 }
3413
3414 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3415 if ((NewSS & X86_SEL_RPL) != uCpl)
3416 {
3417 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3418 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3419 }
3420
3421 /*
3422 * Read the descriptor.
3423 */
3424 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3425 if (rcStrict != VINF_SUCCESS)
3426 return rcStrict;
3427
3428 /*
3429 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3430 */
3431 if (!pDesc->Legacy.Gen.u1DescType)
3432 {
3433 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3434 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3435 }
3436
3437 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3438 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3439 {
3440 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3441 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3442 }
3443 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3446 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3447 }
3448
3449 /* Is it there? */
3450 /** @todo testcase: Is this checked before the canonical / limit check below? */
3451 if (!pDesc->Legacy.Gen.u1Present)
3452 {
3453 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3454 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3455 }
3456
3457 return VINF_SUCCESS;
3458}
3459
3460
3461/**
3462 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3463 * not (kind of obsolete now).
3464 *
3465 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3466 */
3467#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3468
3469/**
3470 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3471 *
3472 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3473 * @param a_fEfl The new EFLAGS.
3474 */
3475#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3476
3477/** @} */
3478
3479
3480/** @name Raising Exceptions.
3481 *
3482 * @{
3483 */
3484
3485
3486/**
3487 * Loads the specified stack far pointer from the TSS.
3488 *
3489 * @returns VBox strict status code.
3490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3491 * @param uCpl The CPL to load the stack for.
3492 * @param pSelSS Where to return the new stack segment.
3493 * @param puEsp Where to return the new stack pointer.
3494 */
3495IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3496{
3497 VBOXSTRICTRC rcStrict;
3498 Assert(uCpl < 4);
3499
3500 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3501 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3502 {
3503 /*
3504 * 16-bit TSS (X86TSS16).
3505 */
3506 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3507 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3508 {
3509 uint32_t off = uCpl * 4 + 2;
3510 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3511 {
3512 /** @todo check actual access pattern here. */
3513 uint32_t u32Tmp = 0; /* gcc maybe... */
3514 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3515 if (rcStrict == VINF_SUCCESS)
3516 {
3517 *puEsp = RT_LOWORD(u32Tmp);
3518 *pSelSS = RT_HIWORD(u32Tmp);
3519 return VINF_SUCCESS;
3520 }
3521 }
3522 else
3523 {
3524 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3525 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3526 }
3527 break;
3528 }
3529
3530 /*
3531 * 32-bit TSS (X86TSS32).
3532 */
3533 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3534 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3535 {
3536 uint32_t off = uCpl * 8 + 4;
3537 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3538 {
3539/** @todo check actual access pattern here. */
3540 uint64_t u64Tmp;
3541 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 *puEsp = u64Tmp & UINT32_MAX;
3545 *pSelSS = (RTSEL)(u64Tmp >> 32);
3546 return VINF_SUCCESS;
3547 }
3548 }
3549 else
3550 {
3551 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3552 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3553 }
3554 break;
3555 }
3556
3557 default:
3558 AssertFailed();
3559 rcStrict = VERR_IEM_IPE_4;
3560 break;
3561 }
3562
3563 *puEsp = 0; /* make gcc happy */
3564 *pSelSS = 0; /* make gcc happy */
3565 return rcStrict;
3566}
3567
3568
3569/**
3570 * Loads the specified stack pointer from the 64-bit TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3576 * @param puRsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3579{
3580 Assert(uCpl < 4);
3581 Assert(uIst < 8);
3582 *puRsp = 0; /* make gcc happy */
3583
3584 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3585 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3586
3587 uint32_t off;
3588 if (uIst)
3589 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3590 else
3591 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3592 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3593 {
3594 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3595 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3596 }
3597
3598 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3599}
3600
3601
3602/**
3603 * Adjust the CPU state according to the exception being raised.
3604 *
3605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3606 * @param u8Vector The exception that has been raised.
3607 */
3608DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3609{
3610 switch (u8Vector)
3611 {
3612 case X86_XCPT_DB:
3613 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3614 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3615 break;
3616 /** @todo Read the AMD and Intel exception reference... */
3617 }
3618}
3619
3620
3621/**
3622 * Implements exceptions and interrupts for real mode.
3623 *
3624 * @returns VBox strict status code.
3625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3626 * @param cbInstr The number of bytes to offset rIP by in the return
3627 * address.
3628 * @param u8Vector The interrupt / exception vector number.
3629 * @param fFlags The flags.
3630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3632 */
3633IEM_STATIC VBOXSTRICTRC
3634iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3635 uint8_t cbInstr,
3636 uint8_t u8Vector,
3637 uint32_t fFlags,
3638 uint16_t uErr,
3639 uint64_t uCr2)
3640{
3641 NOREF(uErr); NOREF(uCr2);
3642 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3643
3644 /*
3645 * Read the IDT entry.
3646 */
3647 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3648 {
3649 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3650 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3651 }
3652 RTFAR16 Idte;
3653 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 {
3656 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3657 return rcStrict;
3658 }
3659
3660 /*
3661 * Push the stack frame.
3662 */
3663 uint16_t *pu16Frame;
3664 uint64_t uNewRsp;
3665 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3666 if (rcStrict != VINF_SUCCESS)
3667 return rcStrict;
3668
3669 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3670#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3671 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3672 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3673 fEfl |= UINT16_C(0xf000);
3674#endif
3675 pu16Frame[2] = (uint16_t)fEfl;
3676 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3677 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3678 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3679 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3680 return rcStrict;
3681
3682 /*
3683 * Load the vector address into cs:ip and make exception specific state
3684 * adjustments.
3685 */
3686 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3687 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3688 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3689 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3690 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3691 pVCpu->cpum.GstCtx.rip = Idte.off;
3692 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3693 IEMMISC_SET_EFL(pVCpu, fEfl);
3694
3695 /** @todo do we actually do this in real mode? */
3696 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3697 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3698
3699 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3700}
3701
3702
3703/**
3704 * Loads a NULL data selector into when coming from V8086 mode.
3705 *
3706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3707 * @param pSReg Pointer to the segment register.
3708 */
3709IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3710{
3711 pSReg->Sel = 0;
3712 pSReg->ValidSel = 0;
3713 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3714 {
3715 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3716 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3717 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3718 }
3719 else
3720 {
3721 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3722 /** @todo check this on AMD-V */
3723 pSReg->u64Base = 0;
3724 pSReg->u32Limit = 0;
3725 }
3726}
3727
3728
3729/**
3730 * Loads a segment selector during a task switch in V8086 mode.
3731 *
3732 * @param pSReg Pointer to the segment register.
3733 * @param uSel The selector value to load.
3734 */
3735IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3736{
3737 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3738 pSReg->Sel = uSel;
3739 pSReg->ValidSel = uSel;
3740 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3741 pSReg->u64Base = uSel << 4;
3742 pSReg->u32Limit = 0xffff;
3743 pSReg->Attr.u = 0xf3;
3744}
3745
3746
3747/**
3748 * Loads a NULL data selector into a selector register, both the hidden and
3749 * visible parts, in protected mode.
3750 *
3751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3752 * @param pSReg Pointer to the segment register.
3753 * @param uRpl The RPL.
3754 */
3755IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3756{
3757 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3758 * data selector in protected mode. */
3759 pSReg->Sel = uRpl;
3760 pSReg->ValidSel = uRpl;
3761 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3762 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3763 {
3764 /* VT-x (Intel 3960x) observed doing something like this. */
3765 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3766 pSReg->u32Limit = UINT32_MAX;
3767 pSReg->u64Base = 0;
3768 }
3769 else
3770 {
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3772 pSReg->u32Limit = 0;
3773 pSReg->u64Base = 0;
3774 }
3775}
3776
3777
3778/**
3779 * Loads a segment selector during a task switch in protected mode.
3780 *
3781 * In this task switch scenario, we would throw \#TS exceptions rather than
3782 * \#GPs.
3783 *
3784 * @returns VBox strict status code.
3785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3786 * @param pSReg Pointer to the segment register.
3787 * @param uSel The new selector value.
3788 *
3789 * @remarks This does _not_ handle CS or SS.
3790 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3791 */
3792IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3793{
3794 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3795
3796 /* Null data selector. */
3797 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3798 {
3799 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3801 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3802 return VINF_SUCCESS;
3803 }
3804
3805 /* Fetch the descriptor. */
3806 IEMSELDESC Desc;
3807 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3808 if (rcStrict != VINF_SUCCESS)
3809 {
3810 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3811 VBOXSTRICTRC_VAL(rcStrict)));
3812 return rcStrict;
3813 }
3814
3815 /* Must be a data segment or readable code segment. */
3816 if ( !Desc.Legacy.Gen.u1DescType
3817 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3818 {
3819 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3820 Desc.Legacy.Gen.u4Type));
3821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3822 }
3823
3824 /* Check privileges for data segments and non-conforming code segments. */
3825 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3826 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3827 {
3828 /* The RPL and the new CPL must be less than or equal to the DPL. */
3829 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3830 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3831 {
3832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3833 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3835 }
3836 }
3837
3838 /* Is it there? */
3839 if (!Desc.Legacy.Gen.u1Present)
3840 {
3841 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3842 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3843 }
3844
3845 /* The base and limit. */
3846 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3847 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3848
3849 /*
3850 * Ok, everything checked out fine. Now set the accessed bit before
3851 * committing the result into the registers.
3852 */
3853 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3854 {
3855 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3856 if (rcStrict != VINF_SUCCESS)
3857 return rcStrict;
3858 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3859 }
3860
3861 /* Commit */
3862 pSReg->Sel = uSel;
3863 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3864 pSReg->u32Limit = cbLimit;
3865 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3866 pSReg->ValidSel = uSel;
3867 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3868 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3869 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3870
3871 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3872 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3873 return VINF_SUCCESS;
3874}
3875
3876
3877/**
3878 * Performs a task switch.
3879 *
3880 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3881 * caller is responsible for performing the necessary checks (like DPL, TSS
3882 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3883 * reference for JMP, CALL, IRET.
3884 *
3885 * If the task switch is the due to a software interrupt or hardware exception,
3886 * the caller is responsible for validating the TSS selector and descriptor. See
3887 * Intel Instruction reference for INT n.
3888 *
3889 * @returns VBox strict status code.
3890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3891 * @param enmTaskSwitch The cause of the task switch.
3892 * @param uNextEip The EIP effective after the task switch.
3893 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3894 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3895 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3896 * @param SelTSS The TSS selector of the new task.
3897 * @param pNewDescTSS Pointer to the new TSS descriptor.
3898 */
3899IEM_STATIC VBOXSTRICTRC
3900iemTaskSwitch(PVMCPUCC pVCpu,
3901 IEMTASKSWITCH enmTaskSwitch,
3902 uint32_t uNextEip,
3903 uint32_t fFlags,
3904 uint16_t uErr,
3905 uint64_t uCr2,
3906 RTSEL SelTSS,
3907 PIEMSELDESC pNewDescTSS)
3908{
3909 Assert(!IEM_IS_REAL_MODE(pVCpu));
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3912
3913 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3914 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3915 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3917 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3918
3919 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3920 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3921
3922 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3923 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3924
3925 /* Update CR2 in case it's a page-fault. */
3926 /** @todo This should probably be done much earlier in IEM/PGM. See
3927 * @bugref{5653#c49}. */
3928 if (fFlags & IEM_XCPT_FLAGS_CR2)
3929 pVCpu->cpum.GstCtx.cr2 = uCr2;
3930
3931 /*
3932 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3933 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3934 */
3935 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3936 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3937 if (uNewTSSLimit < uNewTSSLimitMin)
3938 {
3939 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3940 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3942 }
3943
3944 /*
3945 * Task switches in VMX non-root mode always cause task switches.
3946 * The new TSS must have been read and validated (DPL, limits etc.) before a
3947 * task-switch VM-exit commences.
3948 *
3949 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3950 */
3951 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3952 {
3953 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3954 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3955 }
3956
3957 /*
3958 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3959 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3960 */
3961 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3962 {
3963 uint32_t const uExitInfo1 = SelTSS;
3964 uint32_t uExitInfo2 = uErr;
3965 switch (enmTaskSwitch)
3966 {
3967 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3968 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3969 default: break;
3970 }
3971 if (fFlags & IEM_XCPT_FLAGS_ERR)
3972 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3973 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3974 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3975
3976 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3977 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3978 RT_NOREF2(uExitInfo1, uExitInfo2);
3979 }
3980
3981 /*
3982 * Check the current TSS limit. The last written byte to the current TSS during the
3983 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3984 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3985 *
3986 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3987 * end up with smaller than "legal" TSS limits.
3988 */
3989 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3990 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3991 if (uCurTSSLimit < uCurTSSLimitMin)
3992 {
3993 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3994 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3995 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3996 }
3997
3998 /*
3999 * Verify that the new TSS can be accessed and map it. Map only the required contents
4000 * and not the entire TSS.
4001 */
4002 void *pvNewTSS;
4003 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4004 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4005 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4006 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4007 * not perform correct translation if this happens. See Intel spec. 7.2.1
4008 * "Task-State Segment". */
4009 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4010 if (rcStrict != VINF_SUCCESS)
4011 {
4012 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4013 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4014 return rcStrict;
4015 }
4016
4017 /*
4018 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4019 */
4020 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4021 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4022 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4023 {
4024 PX86DESC pDescCurTSS;
4025 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4026 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4027 if (rcStrict != VINF_SUCCESS)
4028 {
4029 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4030 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4031 return rcStrict;
4032 }
4033
4034 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4035 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4036 if (rcStrict != VINF_SUCCESS)
4037 {
4038 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4039 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4040 return rcStrict;
4041 }
4042
4043 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4044 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4045 {
4046 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4047 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4048 u32EFlags &= ~X86_EFL_NT;
4049 }
4050 }
4051
4052 /*
4053 * Save the CPU state into the current TSS.
4054 */
4055 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4056 if (GCPtrNewTSS == GCPtrCurTSS)
4057 {
4058 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4059 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4060 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4061 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4062 pVCpu->cpum.GstCtx.ldtr.Sel));
4063 }
4064 if (fIsNewTSS386)
4065 {
4066 /*
4067 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4068 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4069 */
4070 void *pvCurTSS32;
4071 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4072 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4073 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4074 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4075 if (rcStrict != VINF_SUCCESS)
4076 {
4077 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4078 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4079 return rcStrict;
4080 }
4081
4082 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4083 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4084 pCurTSS32->eip = uNextEip;
4085 pCurTSS32->eflags = u32EFlags;
4086 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4087 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4088 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4089 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4090 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4091 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4092 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4093 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4094 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4095 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4096 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4097 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4098 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4099 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4100
4101 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4102 if (rcStrict != VINF_SUCCESS)
4103 {
4104 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4105 VBOXSTRICTRC_VAL(rcStrict)));
4106 return rcStrict;
4107 }
4108 }
4109 else
4110 {
4111 /*
4112 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4113 */
4114 void *pvCurTSS16;
4115 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4116 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4117 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4118 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4122 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4127 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4128 pCurTSS16->ip = uNextEip;
4129 pCurTSS16->flags = u32EFlags;
4130 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4131 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4132 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4133 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4134 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4135 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4136 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4137 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4138 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4139 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4140 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4141 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4142
4143 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4147 VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150 }
4151
4152 /*
4153 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4154 */
4155 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4156 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4157 {
4158 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4159 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4160 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4161 }
4162
4163 /*
4164 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4165 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4166 */
4167 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4168 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4169 bool fNewDebugTrap;
4170 if (fIsNewTSS386)
4171 {
4172 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4173 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4174 uNewEip = pNewTSS32->eip;
4175 uNewEflags = pNewTSS32->eflags;
4176 uNewEax = pNewTSS32->eax;
4177 uNewEcx = pNewTSS32->ecx;
4178 uNewEdx = pNewTSS32->edx;
4179 uNewEbx = pNewTSS32->ebx;
4180 uNewEsp = pNewTSS32->esp;
4181 uNewEbp = pNewTSS32->ebp;
4182 uNewEsi = pNewTSS32->esi;
4183 uNewEdi = pNewTSS32->edi;
4184 uNewES = pNewTSS32->es;
4185 uNewCS = pNewTSS32->cs;
4186 uNewSS = pNewTSS32->ss;
4187 uNewDS = pNewTSS32->ds;
4188 uNewFS = pNewTSS32->fs;
4189 uNewGS = pNewTSS32->gs;
4190 uNewLdt = pNewTSS32->selLdt;
4191 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4192 }
4193 else
4194 {
4195 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4196 uNewCr3 = 0;
4197 uNewEip = pNewTSS16->ip;
4198 uNewEflags = pNewTSS16->flags;
4199 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4200 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4201 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4202 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4203 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4204 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4205 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4206 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4207 uNewES = pNewTSS16->es;
4208 uNewCS = pNewTSS16->cs;
4209 uNewSS = pNewTSS16->ss;
4210 uNewDS = pNewTSS16->ds;
4211 uNewFS = 0;
4212 uNewGS = 0;
4213 uNewLdt = pNewTSS16->selLdt;
4214 fNewDebugTrap = false;
4215 }
4216
4217 if (GCPtrNewTSS == GCPtrCurTSS)
4218 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4219 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4220
4221 /*
4222 * We're done accessing the new TSS.
4223 */
4224 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4225 if (rcStrict != VINF_SUCCESS)
4226 {
4227 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4228 return rcStrict;
4229 }
4230
4231 /*
4232 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4233 */
4234 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4235 {
4236 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4237 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* Check that the descriptor indicates the new TSS is available (not busy). */
4246 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4247 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4248 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4249
4250 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4251 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4252 if (rcStrict != VINF_SUCCESS)
4253 {
4254 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4255 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4256 return rcStrict;
4257 }
4258 }
4259
4260 /*
4261 * From this point on, we're technically in the new task. We will defer exceptions
4262 * until the completion of the task switch but before executing any instructions in the new task.
4263 */
4264 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4265 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4266 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4267 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4268 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4269 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4271
4272 /* Set the busy bit in TR. */
4273 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4274
4275 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4276 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4277 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4278 {
4279 uNewEflags |= X86_EFL_NT;
4280 }
4281
4282 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4283 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4284 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4285
4286 pVCpu->cpum.GstCtx.eip = uNewEip;
4287 pVCpu->cpum.GstCtx.eax = uNewEax;
4288 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4289 pVCpu->cpum.GstCtx.edx = uNewEdx;
4290 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4291 pVCpu->cpum.GstCtx.esp = uNewEsp;
4292 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4293 pVCpu->cpum.GstCtx.esi = uNewEsi;
4294 pVCpu->cpum.GstCtx.edi = uNewEdi;
4295
4296 uNewEflags &= X86_EFL_LIVE_MASK;
4297 uNewEflags |= X86_EFL_RA1_MASK;
4298 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4299
4300 /*
4301 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4302 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4303 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4304 */
4305 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4306 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4307
4308 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4309 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4310
4311 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4312 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4313
4314 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4315 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4316
4317 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4318 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4319
4320 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4321 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4322 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4323
4324 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4325 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4326 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4327 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4328
4329 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4330 {
4331 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4337 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4338 }
4339
4340 /*
4341 * Switch CR3 for the new task.
4342 */
4343 if ( fIsNewTSS386
4344 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4345 {
4346 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4347 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4348 AssertRCSuccessReturn(rc, rc);
4349
4350 /* Inform PGM. */
4351 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4352 AssertRCReturn(rc, rc);
4353 /* ignore informational status codes */
4354
4355 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4356 }
4357
4358 /*
4359 * Switch LDTR for the new task.
4360 */
4361 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4362 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4363 else
4364 {
4365 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4366
4367 IEMSELDESC DescNewLdt;
4368 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4369 if (rcStrict != VINF_SUCCESS)
4370 {
4371 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4372 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4373 return rcStrict;
4374 }
4375 if ( !DescNewLdt.Legacy.Gen.u1Present
4376 || DescNewLdt.Legacy.Gen.u1DescType
4377 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4378 {
4379 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4380 uNewLdt, DescNewLdt.Legacy.u));
4381 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4382 }
4383
4384 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4385 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4386 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4387 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4388 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4389 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4390 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4392 }
4393
4394 IEMSELDESC DescSS;
4395 if (IEM_IS_V86_MODE(pVCpu))
4396 {
4397 pVCpu->iem.s.uCpl = 3;
4398 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4399 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4400 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4401 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4402 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4403 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4404
4405 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4406 DescSS.Legacy.u = 0;
4407 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4408 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4409 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4410 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4411 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4412 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4413 DescSS.Legacy.Gen.u2Dpl = 3;
4414 }
4415 else
4416 {
4417 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4418
4419 /*
4420 * Load the stack segment for the new task.
4421 */
4422 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4423 {
4424 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4425 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4426 }
4427
4428 /* Fetch the descriptor. */
4429 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4430 if (rcStrict != VINF_SUCCESS)
4431 {
4432 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4433 VBOXSTRICTRC_VAL(rcStrict)));
4434 return rcStrict;
4435 }
4436
4437 /* SS must be a data segment and writable. */
4438 if ( !DescSS.Legacy.Gen.u1DescType
4439 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4440 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4441 {
4442 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4443 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4444 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4445 }
4446
4447 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4448 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4449 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4450 {
4451 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4452 uNewCpl));
4453 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4454 }
4455
4456 /* Is it there? */
4457 if (!DescSS.Legacy.Gen.u1Present)
4458 {
4459 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4460 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4461 }
4462
4463 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4464 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4465
4466 /* Set the accessed bit before committing the result into SS. */
4467 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4468 {
4469 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4470 if (rcStrict != VINF_SUCCESS)
4471 return rcStrict;
4472 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4473 }
4474
4475 /* Commit SS. */
4476 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4477 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4478 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4479 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4480 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4481 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4482 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4483
4484 /* CPL has changed, update IEM before loading rest of segments. */
4485 pVCpu->iem.s.uCpl = uNewCpl;
4486
4487 /*
4488 * Load the data segments for the new task.
4489 */
4490 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4491 if (rcStrict != VINF_SUCCESS)
4492 return rcStrict;
4493 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4494 if (rcStrict != VINF_SUCCESS)
4495 return rcStrict;
4496 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4497 if (rcStrict != VINF_SUCCESS)
4498 return rcStrict;
4499 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4500 if (rcStrict != VINF_SUCCESS)
4501 return rcStrict;
4502
4503 /*
4504 * Load the code segment for the new task.
4505 */
4506 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4507 {
4508 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4509 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4510 }
4511
4512 /* Fetch the descriptor. */
4513 IEMSELDESC DescCS;
4514 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4515 if (rcStrict != VINF_SUCCESS)
4516 {
4517 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4518 return rcStrict;
4519 }
4520
4521 /* CS must be a code segment. */
4522 if ( !DescCS.Legacy.Gen.u1DescType
4523 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4524 {
4525 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4526 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4528 }
4529
4530 /* For conforming CS, DPL must be less than or equal to the RPL. */
4531 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4532 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4533 {
4534 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4535 DescCS.Legacy.Gen.u2Dpl));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* For non-conforming CS, DPL must match RPL. */
4540 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4541 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4542 {
4543 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4544 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4545 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4546 }
4547
4548 /* Is it there? */
4549 if (!DescCS.Legacy.Gen.u1Present)
4550 {
4551 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4552 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4556 u64Base = X86DESC_BASE(&DescCS.Legacy);
4557
4558 /* Set the accessed bit before committing the result into CS. */
4559 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4560 {
4561 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4562 if (rcStrict != VINF_SUCCESS)
4563 return rcStrict;
4564 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4565 }
4566
4567 /* Commit CS. */
4568 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4569 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4570 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4571 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4572 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4573 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4574 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4575 }
4576
4577 /** @todo Debug trap. */
4578 if (fIsNewTSS386 && fNewDebugTrap)
4579 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4580
4581 /*
4582 * Construct the error code masks based on what caused this task switch.
4583 * See Intel Instruction reference for INT.
4584 */
4585 uint16_t uExt;
4586 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4587 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4588 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4589 {
4590 uExt = 1;
4591 }
4592 else
4593 uExt = 0;
4594
4595 /*
4596 * Push any error code on to the new stack.
4597 */
4598 if (fFlags & IEM_XCPT_FLAGS_ERR)
4599 {
4600 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4601 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4602 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4603
4604 /* Check that there is sufficient space on the stack. */
4605 /** @todo Factor out segment limit checking for normal/expand down segments
4606 * into a separate function. */
4607 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4608 {
4609 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4610 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4611 {
4612 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4613 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4614 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4615 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4616 }
4617 }
4618 else
4619 {
4620 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4621 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4622 {
4623 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4624 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4625 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4626 }
4627 }
4628
4629
4630 if (fIsNewTSS386)
4631 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4632 else
4633 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4634 if (rcStrict != VINF_SUCCESS)
4635 {
4636 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4637 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4638 return rcStrict;
4639 }
4640 }
4641
4642 /* Check the new EIP against the new CS limit. */
4643 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4644 {
4645 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4646 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4647 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4648 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4649 }
4650
4651 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4652 pVCpu->cpum.GstCtx.ss.Sel));
4653 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4654}
4655
4656
4657/**
4658 * Implements exceptions and interrupts for protected mode.
4659 *
4660 * @returns VBox strict status code.
4661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4662 * @param cbInstr The number of bytes to offset rIP by in the return
4663 * address.
4664 * @param u8Vector The interrupt / exception vector number.
4665 * @param fFlags The flags.
4666 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4667 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4668 */
4669IEM_STATIC VBOXSTRICTRC
4670iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4671 uint8_t cbInstr,
4672 uint8_t u8Vector,
4673 uint32_t fFlags,
4674 uint16_t uErr,
4675 uint64_t uCr2)
4676{
4677 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4678
4679 /*
4680 * Read the IDT entry.
4681 */
4682 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4683 {
4684 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4685 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4686 }
4687 X86DESC Idte;
4688 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4689 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4690 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4691 {
4692 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4693 return rcStrict;
4694 }
4695 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4696 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4697 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4698
4699 /*
4700 * Check the descriptor type, DPL and such.
4701 * ASSUMES this is done in the same order as described for call-gate calls.
4702 */
4703 if (Idte.Gate.u1DescType)
4704 {
4705 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4706 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4707 }
4708 bool fTaskGate = false;
4709 uint8_t f32BitGate = true;
4710 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4711 switch (Idte.Gate.u4Type)
4712 {
4713 case X86_SEL_TYPE_SYS_UNDEFINED:
4714 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4715 case X86_SEL_TYPE_SYS_LDT:
4716 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4717 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4718 case X86_SEL_TYPE_SYS_UNDEFINED2:
4719 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4720 case X86_SEL_TYPE_SYS_UNDEFINED3:
4721 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4722 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4723 case X86_SEL_TYPE_SYS_UNDEFINED4:
4724 {
4725 /** @todo check what actually happens when the type is wrong...
4726 * esp. call gates. */
4727 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4728 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4729 }
4730
4731 case X86_SEL_TYPE_SYS_286_INT_GATE:
4732 f32BitGate = false;
4733 RT_FALL_THRU();
4734 case X86_SEL_TYPE_SYS_386_INT_GATE:
4735 fEflToClear |= X86_EFL_IF;
4736 break;
4737
4738 case X86_SEL_TYPE_SYS_TASK_GATE:
4739 fTaskGate = true;
4740#ifndef IEM_IMPLEMENTS_TASKSWITCH
4741 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4742#endif
4743 break;
4744
4745 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4746 f32BitGate = false;
4747 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4748 break;
4749
4750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4751 }
4752
4753 /* Check DPL against CPL if applicable. */
4754 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4755 {
4756 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4757 {
4758 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4759 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4760 }
4761 }
4762
4763 /* Is it there? */
4764 if (!Idte.Gate.u1Present)
4765 {
4766 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4767 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4768 }
4769
4770 /* Is it a task-gate? */
4771 if (fTaskGate)
4772 {
4773 /*
4774 * Construct the error code masks based on what caused this task switch.
4775 * See Intel Instruction reference for INT.
4776 */
4777 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4778 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4779 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4780 RTSEL SelTSS = Idte.Gate.u16Sel;
4781
4782 /*
4783 * Fetch the TSS descriptor in the GDT.
4784 */
4785 IEMSELDESC DescTSS;
4786 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4787 if (rcStrict != VINF_SUCCESS)
4788 {
4789 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4790 VBOXSTRICTRC_VAL(rcStrict)));
4791 return rcStrict;
4792 }
4793
4794 /* The TSS descriptor must be a system segment and be available (not busy). */
4795 if ( DescTSS.Legacy.Gen.u1DescType
4796 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4797 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4798 {
4799 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4800 u8Vector, SelTSS, DescTSS.Legacy.au64));
4801 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4802 }
4803
4804 /* The TSS must be present. */
4805 if (!DescTSS.Legacy.Gen.u1Present)
4806 {
4807 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* Do the actual task switch. */
4812 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4813 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4814 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4815 }
4816
4817 /* A null CS is bad. */
4818 RTSEL NewCS = Idte.Gate.u16Sel;
4819 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4820 {
4821 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4822 return iemRaiseGeneralProtectionFault0(pVCpu);
4823 }
4824
4825 /* Fetch the descriptor for the new CS. */
4826 IEMSELDESC DescCS;
4827 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4828 if (rcStrict != VINF_SUCCESS)
4829 {
4830 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4831 return rcStrict;
4832 }
4833
4834 /* Must be a code segment. */
4835 if (!DescCS.Legacy.Gen.u1DescType)
4836 {
4837 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4838 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4839 }
4840 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4841 {
4842 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4843 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4844 }
4845
4846 /* Don't allow lowering the privilege level. */
4847 /** @todo Does the lowering of privileges apply to software interrupts
4848 * only? This has bearings on the more-privileged or
4849 * same-privilege stack behavior further down. A testcase would
4850 * be nice. */
4851 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4854 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4855 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4856 }
4857
4858 /* Make sure the selector is present. */
4859 if (!DescCS.Legacy.Gen.u1Present)
4860 {
4861 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4862 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4863 }
4864
4865 /* Check the new EIP against the new CS limit. */
4866 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4867 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4868 ? Idte.Gate.u16OffsetLow
4869 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4870 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4871 if (uNewEip > cbLimitCS)
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4874 u8Vector, uNewEip, cbLimitCS, NewCS));
4875 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4876 }
4877 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4878
4879 /* Calc the flag image to push. */
4880 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4881 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4882 fEfl &= ~X86_EFL_RF;
4883 else
4884 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4885
4886 /* From V8086 mode only go to CPL 0. */
4887 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4888 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4889 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4890 {
4891 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4892 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4893 }
4894
4895 /*
4896 * If the privilege level changes, we need to get a new stack from the TSS.
4897 * This in turns means validating the new SS and ESP...
4898 */
4899 if (uNewCpl != pVCpu->iem.s.uCpl)
4900 {
4901 RTSEL NewSS;
4902 uint32_t uNewEsp;
4903 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4904 if (rcStrict != VINF_SUCCESS)
4905 return rcStrict;
4906
4907 IEMSELDESC DescSS;
4908 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4909 if (rcStrict != VINF_SUCCESS)
4910 return rcStrict;
4911 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4912 if (!DescSS.Legacy.Gen.u1DefBig)
4913 {
4914 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4915 uNewEsp = (uint16_t)uNewEsp;
4916 }
4917
4918 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4919
4920 /* Check that there is sufficient space for the stack frame. */
4921 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4922 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4923 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4924 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4925
4926 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4927 {
4928 if ( uNewEsp - 1 > cbLimitSS
4929 || uNewEsp < cbStackFrame)
4930 {
4931 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4932 u8Vector, NewSS, uNewEsp, cbStackFrame));
4933 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4934 }
4935 }
4936 else
4937 {
4938 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4939 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4940 {
4941 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4942 u8Vector, NewSS, uNewEsp, cbStackFrame));
4943 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4944 }
4945 }
4946
4947 /*
4948 * Start making changes.
4949 */
4950
4951 /* Set the new CPL so that stack accesses use it. */
4952 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4953 pVCpu->iem.s.uCpl = uNewCpl;
4954
4955 /* Create the stack frame. */
4956 RTPTRUNION uStackFrame;
4957 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4958 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4959 if (rcStrict != VINF_SUCCESS)
4960 return rcStrict;
4961 void * const pvStackFrame = uStackFrame.pv;
4962 if (f32BitGate)
4963 {
4964 if (fFlags & IEM_XCPT_FLAGS_ERR)
4965 *uStackFrame.pu32++ = uErr;
4966 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4967 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4968 uStackFrame.pu32[2] = fEfl;
4969 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4970 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4971 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4972 if (fEfl & X86_EFL_VM)
4973 {
4974 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4975 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4976 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4977 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4978 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4979 }
4980 }
4981 else
4982 {
4983 if (fFlags & IEM_XCPT_FLAGS_ERR)
4984 *uStackFrame.pu16++ = uErr;
4985 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4986 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4987 uStackFrame.pu16[2] = fEfl;
4988 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4989 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4990 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4991 if (fEfl & X86_EFL_VM)
4992 {
4993 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4994 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4995 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4996 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4997 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4998 }
4999 }
5000 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5001 if (rcStrict != VINF_SUCCESS)
5002 return rcStrict;
5003
5004 /* Mark the selectors 'accessed' (hope this is the correct time). */
5005 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5006 * after pushing the stack frame? (Write protect the gdt + stack to
5007 * find out.) */
5008 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5009 {
5010 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5011 if (rcStrict != VINF_SUCCESS)
5012 return rcStrict;
5013 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5014 }
5015
5016 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5017 {
5018 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5022 }
5023
5024 /*
5025 * Start comitting the register changes (joins with the DPL=CPL branch).
5026 */
5027 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5028 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5029 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5030 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5031 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5032 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5033 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5034 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5035 * SP is loaded).
5036 * Need to check the other combinations too:
5037 * - 16-bit TSS, 32-bit handler
5038 * - 32-bit TSS, 16-bit handler */
5039 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5040 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5041 else
5042 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5043
5044 if (fEfl & X86_EFL_VM)
5045 {
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5048 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5049 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5050 }
5051 }
5052 /*
5053 * Same privilege, no stack change and smaller stack frame.
5054 */
5055 else
5056 {
5057 uint64_t uNewRsp;
5058 RTPTRUNION uStackFrame;
5059 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5060 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5061 if (rcStrict != VINF_SUCCESS)
5062 return rcStrict;
5063 void * const pvStackFrame = uStackFrame.pv;
5064
5065 if (f32BitGate)
5066 {
5067 if (fFlags & IEM_XCPT_FLAGS_ERR)
5068 *uStackFrame.pu32++ = uErr;
5069 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5070 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5071 uStackFrame.pu32[2] = fEfl;
5072 }
5073 else
5074 {
5075 if (fFlags & IEM_XCPT_FLAGS_ERR)
5076 *uStackFrame.pu16++ = uErr;
5077 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5078 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5079 uStackFrame.pu16[2] = fEfl;
5080 }
5081 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5082 if (rcStrict != VINF_SUCCESS)
5083 return rcStrict;
5084
5085 /* Mark the CS selector as 'accessed'. */
5086 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5087 {
5088 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5089 if (rcStrict != VINF_SUCCESS)
5090 return rcStrict;
5091 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5092 }
5093
5094 /*
5095 * Start committing the register changes (joins with the other branch).
5096 */
5097 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5098 }
5099
5100 /* ... register committing continues. */
5101 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5102 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5103 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5104 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5105 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5106 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5107
5108 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5109 fEfl &= ~fEflToClear;
5110 IEMMISC_SET_EFL(pVCpu, fEfl);
5111
5112 if (fFlags & IEM_XCPT_FLAGS_CR2)
5113 pVCpu->cpum.GstCtx.cr2 = uCr2;
5114
5115 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5116 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5117
5118 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5119}
5120
5121
5122/**
5123 * Implements exceptions and interrupts for long mode.
5124 *
5125 * @returns VBox strict status code.
5126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5127 * @param cbInstr The number of bytes to offset rIP by in the return
5128 * address.
5129 * @param u8Vector The interrupt / exception vector number.
5130 * @param fFlags The flags.
5131 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5132 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5133 */
5134IEM_STATIC VBOXSTRICTRC
5135iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5136 uint8_t cbInstr,
5137 uint8_t u8Vector,
5138 uint32_t fFlags,
5139 uint16_t uErr,
5140 uint64_t uCr2)
5141{
5142 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5143
5144 /*
5145 * Read the IDT entry.
5146 */
5147 uint16_t offIdt = (uint16_t)u8Vector << 4;
5148 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5149 {
5150 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5151 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5152 }
5153 X86DESC64 Idte;
5154#ifdef _MSC_VER /* Shut up silly compiler warning. */
5155 Idte.au64[0] = 0;
5156 Idte.au64[1] = 0;
5157#endif
5158 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5159 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5160 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5161 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5162 {
5163 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5164 return rcStrict;
5165 }
5166 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5167 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5168 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5169
5170 /*
5171 * Check the descriptor type, DPL and such.
5172 * ASSUMES this is done in the same order as described for call-gate calls.
5173 */
5174 if (Idte.Gate.u1DescType)
5175 {
5176 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5177 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5178 }
5179 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5180 switch (Idte.Gate.u4Type)
5181 {
5182 case AMD64_SEL_TYPE_SYS_INT_GATE:
5183 fEflToClear |= X86_EFL_IF;
5184 break;
5185 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5186 break;
5187
5188 default:
5189 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5190 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5191 }
5192
5193 /* Check DPL against CPL if applicable. */
5194 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5195 {
5196 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5197 {
5198 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5199 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5200 }
5201 }
5202
5203 /* Is it there? */
5204 if (!Idte.Gate.u1Present)
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5207 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5208 }
5209
5210 /* A null CS is bad. */
5211 RTSEL NewCS = Idte.Gate.u16Sel;
5212 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5215 return iemRaiseGeneralProtectionFault0(pVCpu);
5216 }
5217
5218 /* Fetch the descriptor for the new CS. */
5219 IEMSELDESC DescCS;
5220 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5221 if (rcStrict != VINF_SUCCESS)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5224 return rcStrict;
5225 }
5226
5227 /* Must be a 64-bit code segment. */
5228 if (!DescCS.Long.Gen.u1DescType)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5231 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5232 }
5233 if ( !DescCS.Long.Gen.u1Long
5234 || DescCS.Long.Gen.u1DefBig
5235 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5236 {
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5238 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5240 }
5241
5242 /* Don't allow lowering the privilege level. For non-conforming CS
5243 selectors, the CS.DPL sets the privilege level the trap/interrupt
5244 handler runs at. For conforming CS selectors, the CPL remains
5245 unchanged, but the CS.DPL must be <= CPL. */
5246 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5247 * when CPU in Ring-0. Result \#GP? */
5248 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5249 {
5250 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5251 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5252 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5253 }
5254
5255
5256 /* Make sure the selector is present. */
5257 if (!DescCS.Legacy.Gen.u1Present)
5258 {
5259 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5260 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5261 }
5262
5263 /* Check that the new RIP is canonical. */
5264 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5265 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5266 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5267 if (!IEM_IS_CANONICAL(uNewRip))
5268 {
5269 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5270 return iemRaiseGeneralProtectionFault0(pVCpu);
5271 }
5272
5273 /*
5274 * If the privilege level changes or if the IST isn't zero, we need to get
5275 * a new stack from the TSS.
5276 */
5277 uint64_t uNewRsp;
5278 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5279 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5280 if ( uNewCpl != pVCpu->iem.s.uCpl
5281 || Idte.Gate.u3IST != 0)
5282 {
5283 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5284 if (rcStrict != VINF_SUCCESS)
5285 return rcStrict;
5286 }
5287 else
5288 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5289 uNewRsp &= ~(uint64_t)0xf;
5290
5291 /*
5292 * Calc the flag image to push.
5293 */
5294 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5295 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5296 fEfl &= ~X86_EFL_RF;
5297 else
5298 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5299
5300 /*
5301 * Start making changes.
5302 */
5303 /* Set the new CPL so that stack accesses use it. */
5304 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5305 pVCpu->iem.s.uCpl = uNewCpl;
5306
5307 /* Create the stack frame. */
5308 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5309 RTPTRUNION uStackFrame;
5310 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5311 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5312 if (rcStrict != VINF_SUCCESS)
5313 return rcStrict;
5314 void * const pvStackFrame = uStackFrame.pv;
5315
5316 if (fFlags & IEM_XCPT_FLAGS_ERR)
5317 *uStackFrame.pu64++ = uErr;
5318 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5319 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5320 uStackFrame.pu64[2] = fEfl;
5321 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5322 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5323 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5324 if (rcStrict != VINF_SUCCESS)
5325 return rcStrict;
5326
5327 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5328 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5329 * after pushing the stack frame? (Write protect the gdt + stack to
5330 * find out.) */
5331 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5332 {
5333 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5334 if (rcStrict != VINF_SUCCESS)
5335 return rcStrict;
5336 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5337 }
5338
5339 /*
5340 * Start comitting the register changes.
5341 */
5342 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5343 * hidden registers when interrupting 32-bit or 16-bit code! */
5344 if (uNewCpl != uOldCpl)
5345 {
5346 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5347 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5348 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5349 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5350 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5351 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5352 }
5353 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5354 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5355 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5356 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5357 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5358 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5359 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5360 pVCpu->cpum.GstCtx.rip = uNewRip;
5361
5362 fEfl &= ~fEflToClear;
5363 IEMMISC_SET_EFL(pVCpu, fEfl);
5364
5365 if (fFlags & IEM_XCPT_FLAGS_CR2)
5366 pVCpu->cpum.GstCtx.cr2 = uCr2;
5367
5368 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5369 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5370
5371 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5372}
5373
5374
5375/**
5376 * Implements exceptions and interrupts.
5377 *
5378 * All exceptions and interrupts goes thru this function!
5379 *
5380 * @returns VBox strict status code.
5381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5382 * @param cbInstr The number of bytes to offset rIP by in the return
5383 * address.
5384 * @param u8Vector The interrupt / exception vector number.
5385 * @param fFlags The flags.
5386 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5387 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5388 */
5389DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5390iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5391 uint8_t cbInstr,
5392 uint8_t u8Vector,
5393 uint32_t fFlags,
5394 uint16_t uErr,
5395 uint64_t uCr2)
5396{
5397 /*
5398 * Get all the state that we might need here.
5399 */
5400 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5401 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5402
5403#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5404 /*
5405 * Flush prefetch buffer
5406 */
5407 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5408#endif
5409
5410 /*
5411 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5412 */
5413 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5414 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5415 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5416 | IEM_XCPT_FLAGS_BP_INSTR
5417 | IEM_XCPT_FLAGS_ICEBP_INSTR
5418 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5419 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5420 {
5421 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5422 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5423 u8Vector = X86_XCPT_GP;
5424 uErr = 0;
5425 }
5426#ifdef DBGFTRACE_ENABLED
5427 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5428 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5429 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5430#endif
5431
5432 /*
5433 * Evaluate whether NMI blocking should be in effect.
5434 * Normally, NMI blocking is in effect whenever we inject an NMI.
5435 */
5436 bool fBlockNmi;
5437 if ( u8Vector == X86_XCPT_NMI
5438 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5439 fBlockNmi = true;
5440 else
5441 fBlockNmi = false;
5442
5443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5444 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5445 {
5446 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5447 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5448 return rcStrict0;
5449
5450 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5451 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5452 {
5453 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5454 fBlockNmi = false;
5455 }
5456 }
5457#endif
5458
5459#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5460 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5461 {
5462 /*
5463 * If the event is being injected as part of VMRUN, it isn't subject to event
5464 * intercepts in the nested-guest. However, secondary exceptions that occur
5465 * during injection of any event -are- subject to exception intercepts.
5466 *
5467 * See AMD spec. 15.20 "Event Injection".
5468 */
5469 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5470 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5471 else
5472 {
5473 /*
5474 * Check and handle if the event being raised is intercepted.
5475 */
5476 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5477 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5478 return rcStrict0;
5479 }
5480 }
5481#endif
5482
5483 /*
5484 * Set NMI blocking if necessary.
5485 */
5486 if ( fBlockNmi
5487 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5488 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5489
5490 /*
5491 * Do recursion accounting.
5492 */
5493 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5494 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5495 if (pVCpu->iem.s.cXcptRecursions == 0)
5496 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5497 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5498 else
5499 {
5500 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5501 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5502 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5503
5504 if (pVCpu->iem.s.cXcptRecursions >= 4)
5505 {
5506#ifdef DEBUG_bird
5507 AssertFailed();
5508#endif
5509 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5510 }
5511
5512 /*
5513 * Evaluate the sequence of recurring events.
5514 */
5515 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5516 NULL /* pXcptRaiseInfo */);
5517 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5518 { /* likely */ }
5519 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5520 {
5521 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5522 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5523 u8Vector = X86_XCPT_DF;
5524 uErr = 0;
5525#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5526 /* VMX nested-guest #DF intercept needs to be checked here. */
5527 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5528 {
5529 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5530 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5531 return rcStrict0;
5532 }
5533#endif
5534 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5535 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5536 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5537 }
5538 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5539 {
5540 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5541 return iemInitiateCpuShutdown(pVCpu);
5542 }
5543 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5544 {
5545 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5546 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5547 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5548 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5549 return VERR_EM_GUEST_CPU_HANG;
5550 }
5551 else
5552 {
5553 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5554 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5555 return VERR_IEM_IPE_9;
5556 }
5557
5558 /*
5559 * The 'EXT' bit is set when an exception occurs during deliver of an external
5560 * event (such as an interrupt or earlier exception)[1]. Privileged software
5561 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5562 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5563 *
5564 * [1] - Intel spec. 6.13 "Error Code"
5565 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5566 * [3] - Intel Instruction reference for INT n.
5567 */
5568 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5569 && (fFlags & IEM_XCPT_FLAGS_ERR)
5570 && u8Vector != X86_XCPT_PF
5571 && u8Vector != X86_XCPT_DF)
5572 {
5573 uErr |= X86_TRAP_ERR_EXTERNAL;
5574 }
5575 }
5576
5577 pVCpu->iem.s.cXcptRecursions++;
5578 pVCpu->iem.s.uCurXcpt = u8Vector;
5579 pVCpu->iem.s.fCurXcpt = fFlags;
5580 pVCpu->iem.s.uCurXcptErr = uErr;
5581 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5582
5583 /*
5584 * Extensive logging.
5585 */
5586#if defined(LOG_ENABLED) && defined(IN_RING3)
5587 if (LogIs3Enabled())
5588 {
5589 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5590 PVM pVM = pVCpu->CTX_SUFF(pVM);
5591 char szRegs[4096];
5592 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5593 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5594 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5595 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5596 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5597 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5598 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5599 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5600 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5601 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5602 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5603 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5604 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5605 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5606 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5607 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5608 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5609 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5610 " efer=%016VR{efer}\n"
5611 " pat=%016VR{pat}\n"
5612 " sf_mask=%016VR{sf_mask}\n"
5613 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5614 " lstar=%016VR{lstar}\n"
5615 " star=%016VR{star} cstar=%016VR{cstar}\n"
5616 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5617 );
5618
5619 char szInstr[256];
5620 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5621 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5622 szInstr, sizeof(szInstr), NULL);
5623 Log3(("%s%s\n", szRegs, szInstr));
5624 }
5625#endif /* LOG_ENABLED */
5626
5627 /*
5628 * Call the mode specific worker function.
5629 */
5630 VBOXSTRICTRC rcStrict;
5631 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5632 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5633 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5634 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5635 else
5636 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5637
5638 /* Flush the prefetch buffer. */
5639#ifdef IEM_WITH_CODE_TLB
5640 pVCpu->iem.s.pbInstrBuf = NULL;
5641#else
5642 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5643#endif
5644
5645 /*
5646 * Unwind.
5647 */
5648 pVCpu->iem.s.cXcptRecursions--;
5649 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5650 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5651 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5652 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5653 pVCpu->iem.s.cXcptRecursions + 1));
5654 return rcStrict;
5655}
5656
5657#ifdef IEM_WITH_SETJMP
5658/**
5659 * See iemRaiseXcptOrInt. Will not return.
5660 */
5661IEM_STATIC DECL_NO_RETURN(void)
5662iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5663 uint8_t cbInstr,
5664 uint8_t u8Vector,
5665 uint32_t fFlags,
5666 uint16_t uErr,
5667 uint64_t uCr2)
5668{
5669 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5670 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5671}
5672#endif
5673
5674
5675/** \#DE - 00. */
5676DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5677{
5678 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5679}
5680
5681
5682/** \#DB - 01.
5683 * @note This automatically clear DR7.GD. */
5684DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5685{
5686 /** @todo set/clear RF. */
5687 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5689}
5690
5691
5692/** \#BR - 05. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/** \#UD - 06. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5703}
5704
5705
5706/** \#NM - 07. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5710}
5711
5712
5713/** \#TS(err) - 0a. */
5714DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5715{
5716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5717}
5718
5719
5720/** \#TS(tr) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5724 pVCpu->cpum.GstCtx.tr.Sel, 0);
5725}
5726
5727
5728/** \#TS(0) - 0a. */
5729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5730{
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5732 0, 0);
5733}
5734
5735
5736/** \#TS(err) - 0a. */
5737DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5738{
5739 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5740 uSel & X86_SEL_MASK_OFF_RPL, 0);
5741}
5742
5743
5744/** \#NP(err) - 0b. */
5745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5746{
5747 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5748}
5749
5750
5751/** \#NP(sel) - 0b. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5755 uSel & ~X86_SEL_RPL, 0);
5756}
5757
5758
5759/** \#SS(seg) - 0c. */
5760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5761{
5762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5763 uSel & ~X86_SEL_RPL, 0);
5764}
5765
5766
5767/** \#SS(err) - 0c. */
5768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5769{
5770 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5771}
5772
5773
5774/** \#GP(n) - 0d. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#GP(0) - 0d. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5785}
5786
5787#ifdef IEM_WITH_SETJMP
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5790{
5791 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793#endif
5794
5795
5796/** \#GP(sel) - 0d. */
5797DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5798{
5799 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5800 Sel & ~X86_SEL_RPL, 0);
5801}
5802
5803
5804/** \#GP(0) - 0d. */
5805DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5806{
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5808}
5809
5810
5811/** \#GP(sel) - 0d. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5813{
5814 NOREF(iSegReg); NOREF(fAccess);
5815 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5816 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5817}
5818
5819#ifdef IEM_WITH_SETJMP
5820/** \#GP(sel) - 0d, longjmp. */
5821DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5822{
5823 NOREF(iSegReg); NOREF(fAccess);
5824 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5825 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5826}
5827#endif
5828
5829/** \#GP(sel) - 0d. */
5830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5831{
5832 NOREF(Sel);
5833 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5834}
5835
5836#ifdef IEM_WITH_SETJMP
5837/** \#GP(sel) - 0d, longjmp. */
5838DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5839{
5840 NOREF(Sel);
5841 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5842}
5843#endif
5844
5845
5846/** \#GP(sel) - 0d. */
5847DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5848{
5849 NOREF(iSegReg); NOREF(fAccess);
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5851}
5852
5853#ifdef IEM_WITH_SETJMP
5854/** \#GP(sel) - 0d, longjmp. */
5855DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5856 uint32_t fAccess)
5857{
5858 NOREF(iSegReg); NOREF(fAccess);
5859 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5860}
5861#endif
5862
5863
5864/** \#PF(n) - 0e. */
5865DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5866{
5867 uint16_t uErr;
5868 switch (rc)
5869 {
5870 case VERR_PAGE_NOT_PRESENT:
5871 case VERR_PAGE_TABLE_NOT_PRESENT:
5872 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5873 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5874 uErr = 0;
5875 break;
5876
5877 default:
5878 AssertMsgFailed(("%Rrc\n", rc));
5879 RT_FALL_THRU();
5880 case VERR_ACCESS_DENIED:
5881 uErr = X86_TRAP_PF_P;
5882 break;
5883
5884 /** @todo reserved */
5885 }
5886
5887 if (pVCpu->iem.s.uCpl == 3)
5888 uErr |= X86_TRAP_PF_US;
5889
5890 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5891 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5892 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5893 uErr |= X86_TRAP_PF_ID;
5894
5895#if 0 /* This is so much non-sense, really. Why was it done like that? */
5896 /* Note! RW access callers reporting a WRITE protection fault, will clear
5897 the READ flag before calling. So, read-modify-write accesses (RW)
5898 can safely be reported as READ faults. */
5899 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5900 uErr |= X86_TRAP_PF_RW;
5901#else
5902 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5903 {
5904 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5905 uErr |= X86_TRAP_PF_RW;
5906 }
5907#endif
5908
5909 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5910 uErr, GCPtrWhere);
5911}
5912
5913#ifdef IEM_WITH_SETJMP
5914/** \#PF(n) - 0e, longjmp. */
5915IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5916{
5917 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5918}
5919#endif
5920
5921
5922/** \#MF(0) - 10. */
5923DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5924{
5925 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5926}
5927
5928
5929/** \#AC(0) - 11. */
5930DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5931{
5932 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5933}
5934
5935
5936/**
5937 * Macro for calling iemCImplRaiseDivideError().
5938 *
5939 * This enables us to add/remove arguments and force different levels of
5940 * inlining as we wish.
5941 *
5942 * @return Strict VBox status code.
5943 */
5944#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5945IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5946{
5947 NOREF(cbInstr);
5948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5949}
5950
5951
5952/**
5953 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5954 *
5955 * This enables us to add/remove arguments and force different levels of
5956 * inlining as we wish.
5957 *
5958 * @return Strict VBox status code.
5959 */
5960#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5961IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5962{
5963 NOREF(cbInstr);
5964 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5965}
5966
5967
5968/**
5969 * Macro for calling iemCImplRaiseInvalidOpcode().
5970 *
5971 * This enables us to add/remove arguments and force different levels of
5972 * inlining as we wish.
5973 *
5974 * @return Strict VBox status code.
5975 */
5976#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5977IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5978{
5979 NOREF(cbInstr);
5980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5981}
5982
5983
5984/** @} */
5985
5986
5987/*
5988 *
5989 * Helpers routines.
5990 * Helpers routines.
5991 * Helpers routines.
5992 *
5993 */
5994
5995/**
5996 * Recalculates the effective operand size.
5997 *
5998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5999 */
6000IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6001{
6002 switch (pVCpu->iem.s.enmCpuMode)
6003 {
6004 case IEMMODE_16BIT:
6005 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6006 break;
6007 case IEMMODE_32BIT:
6008 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6009 break;
6010 case IEMMODE_64BIT:
6011 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6012 {
6013 case 0:
6014 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6015 break;
6016 case IEM_OP_PRF_SIZE_OP:
6017 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6018 break;
6019 case IEM_OP_PRF_SIZE_REX_W:
6020 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6021 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6022 break;
6023 }
6024 break;
6025 default:
6026 AssertFailed();
6027 }
6028}
6029
6030
6031/**
6032 * Sets the default operand size to 64-bit and recalculates the effective
6033 * operand size.
6034 *
6035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6036 */
6037IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6038{
6039 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6040 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6041 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6042 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6043 else
6044 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6045}
6046
6047
6048/*
6049 *
6050 * Common opcode decoders.
6051 * Common opcode decoders.
6052 * Common opcode decoders.
6053 *
6054 */
6055//#include <iprt/mem.h>
6056
6057/**
6058 * Used to add extra details about a stub case.
6059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6060 */
6061IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6062{
6063#if defined(LOG_ENABLED) && defined(IN_RING3)
6064 PVM pVM = pVCpu->CTX_SUFF(pVM);
6065 char szRegs[4096];
6066 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6067 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6068 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6069 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6070 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6071 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6072 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6073 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6074 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6075 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6076 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6077 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6078 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6079 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6080 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6081 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6082 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6083 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6084 " efer=%016VR{efer}\n"
6085 " pat=%016VR{pat}\n"
6086 " sf_mask=%016VR{sf_mask}\n"
6087 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6088 " lstar=%016VR{lstar}\n"
6089 " star=%016VR{star} cstar=%016VR{cstar}\n"
6090 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6091 );
6092
6093 char szInstr[256];
6094 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6095 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6096 szInstr, sizeof(szInstr), NULL);
6097
6098 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6099#else
6100 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6101#endif
6102}
6103
6104/**
6105 * Complains about a stub.
6106 *
6107 * Providing two versions of this macro, one for daily use and one for use when
6108 * working on IEM.
6109 */
6110#if 0
6111# define IEMOP_BITCH_ABOUT_STUB() \
6112 do { \
6113 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6114 iemOpStubMsg2(pVCpu); \
6115 RTAssertPanic(); \
6116 } while (0)
6117#else
6118# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6119#endif
6120
6121/** Stubs an opcode. */
6122#define FNIEMOP_STUB(a_Name) \
6123 FNIEMOP_DEF(a_Name) \
6124 { \
6125 RT_NOREF_PV(pVCpu); \
6126 IEMOP_BITCH_ABOUT_STUB(); \
6127 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6128 } \
6129 typedef int ignore_semicolon
6130
6131/** Stubs an opcode. */
6132#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6133 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6134 { \
6135 RT_NOREF_PV(pVCpu); \
6136 RT_NOREF_PV(a_Name0); \
6137 IEMOP_BITCH_ABOUT_STUB(); \
6138 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6139 } \
6140 typedef int ignore_semicolon
6141
6142/** Stubs an opcode which currently should raise \#UD. */
6143#define FNIEMOP_UD_STUB(a_Name) \
6144 FNIEMOP_DEF(a_Name) \
6145 { \
6146 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6147 return IEMOP_RAISE_INVALID_OPCODE(); \
6148 } \
6149 typedef int ignore_semicolon
6150
6151/** Stubs an opcode which currently should raise \#UD. */
6152#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6153 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6154 { \
6155 RT_NOREF_PV(pVCpu); \
6156 RT_NOREF_PV(a_Name0); \
6157 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6158 return IEMOP_RAISE_INVALID_OPCODE(); \
6159 } \
6160 typedef int ignore_semicolon
6161
6162
6163
6164/** @name Register Access.
6165 * @{
6166 */
6167
6168/**
6169 * Gets a reference (pointer) to the specified hidden segment register.
6170 *
6171 * @returns Hidden register reference.
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param iSegReg The segment register.
6174 */
6175IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6176{
6177 Assert(iSegReg < X86_SREG_COUNT);
6178 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6179 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6180
6181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6182 return pSReg;
6183}
6184
6185
6186/**
6187 * Ensures that the given hidden segment register is up to date.
6188 *
6189 * @returns Hidden register reference.
6190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6191 * @param pSReg The segment register.
6192 */
6193IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6194{
6195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6196 NOREF(pVCpu);
6197 return pSReg;
6198}
6199
6200
6201/**
6202 * Gets a reference (pointer) to the specified segment register (the selector
6203 * value).
6204 *
6205 * @returns Pointer to the selector variable.
6206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6207 * @param iSegReg The segment register.
6208 */
6209DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6210{
6211 Assert(iSegReg < X86_SREG_COUNT);
6212 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6213 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6214}
6215
6216
6217/**
6218 * Fetches the selector value of a segment register.
6219 *
6220 * @returns The selector value.
6221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6222 * @param iSegReg The segment register.
6223 */
6224DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6225{
6226 Assert(iSegReg < X86_SREG_COUNT);
6227 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6228 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6229}
6230
6231
6232/**
6233 * Fetches the base address value of a segment register.
6234 *
6235 * @returns The selector value.
6236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6237 * @param iSegReg The segment register.
6238 */
6239DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6240{
6241 Assert(iSegReg < X86_SREG_COUNT);
6242 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6243 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6244}
6245
6246
6247/**
6248 * Gets a reference (pointer) to the specified general purpose register.
6249 *
6250 * @returns Register reference.
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 * @param iReg The general purpose register.
6253 */
6254DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6255{
6256 Assert(iReg < 16);
6257 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6258}
6259
6260
6261/**
6262 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6263 *
6264 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6265 *
6266 * @returns Register reference.
6267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6268 * @param iReg The register.
6269 */
6270DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6271{
6272 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6273 {
6274 Assert(iReg < 16);
6275 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6276 }
6277 /* high 8-bit register. */
6278 Assert(iReg < 8);
6279 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6280}
6281
6282
6283/**
6284 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6285 *
6286 * @returns Register reference.
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param iReg The register.
6289 */
6290DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6291{
6292 Assert(iReg < 16);
6293 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6294}
6295
6296
6297/**
6298 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6299 *
6300 * @returns Register reference.
6301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6302 * @param iReg The register.
6303 */
6304DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6305{
6306 Assert(iReg < 16);
6307 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6308}
6309
6310
6311/**
6312 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6313 *
6314 * @returns Register reference.
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 * @param iReg The register.
6317 */
6318DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6319{
6320 Assert(iReg < 64);
6321 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6322}
6323
6324
6325/**
6326 * Gets a reference (pointer) to the specified segment register's base address.
6327 *
6328 * @returns Segment register base address reference.
6329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6330 * @param iSegReg The segment selector.
6331 */
6332DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6333{
6334 Assert(iSegReg < X86_SREG_COUNT);
6335 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6336 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6337}
6338
6339
6340/**
6341 * Fetches the value of a 8-bit general purpose register.
6342 *
6343 * @returns The register value.
6344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6345 * @param iReg The register.
6346 */
6347DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6348{
6349 return *iemGRegRefU8(pVCpu, iReg);
6350}
6351
6352
6353/**
6354 * Fetches the value of a 16-bit general purpose register.
6355 *
6356 * @returns The register value.
6357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6358 * @param iReg The register.
6359 */
6360DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6361{
6362 Assert(iReg < 16);
6363 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6364}
6365
6366
6367/**
6368 * Fetches the value of a 32-bit general purpose register.
6369 *
6370 * @returns The register value.
6371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6372 * @param iReg The register.
6373 */
6374DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6375{
6376 Assert(iReg < 16);
6377 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6378}
6379
6380
6381/**
6382 * Fetches the value of a 64-bit general purpose register.
6383 *
6384 * @returns The register value.
6385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6386 * @param iReg The register.
6387 */
6388DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6389{
6390 Assert(iReg < 16);
6391 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6392}
6393
6394
6395/**
6396 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6397 *
6398 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6399 * segment limit.
6400 *
6401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6402 * @param offNextInstr The offset of the next instruction.
6403 */
6404IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6405{
6406 switch (pVCpu->iem.s.enmEffOpSize)
6407 {
6408 case IEMMODE_16BIT:
6409 {
6410 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6411 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6412 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6413 return iemRaiseGeneralProtectionFault0(pVCpu);
6414 pVCpu->cpum.GstCtx.rip = uNewIp;
6415 break;
6416 }
6417
6418 case IEMMODE_32BIT:
6419 {
6420 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6421 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6422
6423 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6424 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6425 return iemRaiseGeneralProtectionFault0(pVCpu);
6426 pVCpu->cpum.GstCtx.rip = uNewEip;
6427 break;
6428 }
6429
6430 case IEMMODE_64BIT:
6431 {
6432 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6433
6434 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6435 if (!IEM_IS_CANONICAL(uNewRip))
6436 return iemRaiseGeneralProtectionFault0(pVCpu);
6437 pVCpu->cpum.GstCtx.rip = uNewRip;
6438 break;
6439 }
6440
6441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6442 }
6443
6444 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6445
6446#ifndef IEM_WITH_CODE_TLB
6447 /* Flush the prefetch buffer. */
6448 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6449#endif
6450
6451 return VINF_SUCCESS;
6452}
6453
6454
6455/**
6456 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6457 *
6458 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6459 * segment limit.
6460 *
6461 * @returns Strict VBox status code.
6462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6463 * @param offNextInstr The offset of the next instruction.
6464 */
6465IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6466{
6467 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6468
6469 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6470 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6471 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6472 return iemRaiseGeneralProtectionFault0(pVCpu);
6473 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6474 pVCpu->cpum.GstCtx.rip = uNewIp;
6475 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6476
6477#ifndef IEM_WITH_CODE_TLB
6478 /* Flush the prefetch buffer. */
6479 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6480#endif
6481
6482 return VINF_SUCCESS;
6483}
6484
6485
6486/**
6487 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6488 *
6489 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6490 * segment limit.
6491 *
6492 * @returns Strict VBox status code.
6493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6494 * @param offNextInstr The offset of the next instruction.
6495 */
6496IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6497{
6498 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6499
6500 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6501 {
6502 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6503
6504 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6505 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6506 return iemRaiseGeneralProtectionFault0(pVCpu);
6507 pVCpu->cpum.GstCtx.rip = uNewEip;
6508 }
6509 else
6510 {
6511 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6512
6513 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6514 if (!IEM_IS_CANONICAL(uNewRip))
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pVCpu->cpum.GstCtx.rip = uNewRip;
6517 }
6518 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6519
6520#ifndef IEM_WITH_CODE_TLB
6521 /* Flush the prefetch buffer. */
6522 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6523#endif
6524
6525 return VINF_SUCCESS;
6526}
6527
6528
6529/**
6530 * Performs a near jump to the specified address.
6531 *
6532 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6533 * segment limit.
6534 *
6535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6536 * @param uNewRip The new RIP value.
6537 */
6538IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6539{
6540 switch (pVCpu->iem.s.enmEffOpSize)
6541 {
6542 case IEMMODE_16BIT:
6543 {
6544 Assert(uNewRip <= UINT16_MAX);
6545 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6546 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6547 return iemRaiseGeneralProtectionFault0(pVCpu);
6548 /** @todo Test 16-bit jump in 64-bit mode. */
6549 pVCpu->cpum.GstCtx.rip = uNewRip;
6550 break;
6551 }
6552
6553 case IEMMODE_32BIT:
6554 {
6555 Assert(uNewRip <= UINT32_MAX);
6556 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6557 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6558
6559 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6560 return iemRaiseGeneralProtectionFault0(pVCpu);
6561 pVCpu->cpum.GstCtx.rip = uNewRip;
6562 break;
6563 }
6564
6565 case IEMMODE_64BIT:
6566 {
6567 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6568
6569 if (!IEM_IS_CANONICAL(uNewRip))
6570 return iemRaiseGeneralProtectionFault0(pVCpu);
6571 pVCpu->cpum.GstCtx.rip = uNewRip;
6572 break;
6573 }
6574
6575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6576 }
6577
6578 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6579
6580#ifndef IEM_WITH_CODE_TLB
6581 /* Flush the prefetch buffer. */
6582 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6583#endif
6584
6585 return VINF_SUCCESS;
6586}
6587
6588
6589/**
6590 * Get the address of the top of the stack.
6591 *
6592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6593 */
6594DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6595{
6596 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6597 return pVCpu->cpum.GstCtx.rsp;
6598 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6599 return pVCpu->cpum.GstCtx.esp;
6600 return pVCpu->cpum.GstCtx.sp;
6601}
6602
6603
6604/**
6605 * Updates the RIP/EIP/IP to point to the next instruction.
6606 *
6607 * This function leaves the EFLAGS.RF flag alone.
6608 *
6609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6610 * @param cbInstr The number of bytes to add.
6611 */
6612IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6613{
6614 switch (pVCpu->iem.s.enmCpuMode)
6615 {
6616 case IEMMODE_16BIT:
6617 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6618 pVCpu->cpum.GstCtx.eip += cbInstr;
6619 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6620 break;
6621
6622 case IEMMODE_32BIT:
6623 pVCpu->cpum.GstCtx.eip += cbInstr;
6624 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6625 break;
6626
6627 case IEMMODE_64BIT:
6628 pVCpu->cpum.GstCtx.rip += cbInstr;
6629 break;
6630 default: AssertFailed();
6631 }
6632}
6633
6634
6635#if 0
6636/**
6637 * Updates the RIP/EIP/IP to point to the next instruction.
6638 *
6639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6640 */
6641IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6642{
6643 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6644}
6645#endif
6646
6647
6648
6649/**
6650 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param cbInstr The number of bytes to add.
6654 */
6655IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6656{
6657 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6658
6659 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6660#if ARCH_BITS >= 64
6661 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6662 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6663 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6664#else
6665 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6666 pVCpu->cpum.GstCtx.rip += cbInstr;
6667 else
6668 pVCpu->cpum.GstCtx.eip += cbInstr;
6669#endif
6670}
6671
6672
6673/**
6674 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6675 *
6676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6677 */
6678IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6679{
6680 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6681}
6682
6683
6684/**
6685 * Adds to the stack pointer.
6686 *
6687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6688 * @param cbToAdd The number of bytes to add (8-bit!).
6689 */
6690DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6691{
6692 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6693 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6694 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6695 pVCpu->cpum.GstCtx.esp += cbToAdd;
6696 else
6697 pVCpu->cpum.GstCtx.sp += cbToAdd;
6698}
6699
6700
6701/**
6702 * Subtracts from the stack pointer.
6703 *
6704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6705 * @param cbToSub The number of bytes to subtract (8-bit!).
6706 */
6707DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6708{
6709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6710 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6711 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6712 pVCpu->cpum.GstCtx.esp -= cbToSub;
6713 else
6714 pVCpu->cpum.GstCtx.sp -= cbToSub;
6715}
6716
6717
6718/**
6719 * Adds to the temporary stack pointer.
6720 *
6721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6722 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6723 * @param cbToAdd The number of bytes to add (16-bit).
6724 */
6725DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6726{
6727 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6728 pTmpRsp->u += cbToAdd;
6729 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6730 pTmpRsp->DWords.dw0 += cbToAdd;
6731 else
6732 pTmpRsp->Words.w0 += cbToAdd;
6733}
6734
6735
6736/**
6737 * Subtracts from the temporary stack pointer.
6738 *
6739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6740 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6741 * @param cbToSub The number of bytes to subtract.
6742 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6743 * expecting that.
6744 */
6745DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6746{
6747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6748 pTmpRsp->u -= cbToSub;
6749 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6750 pTmpRsp->DWords.dw0 -= cbToSub;
6751 else
6752 pTmpRsp->Words.w0 -= cbToSub;
6753}
6754
6755
6756/**
6757 * Calculates the effective stack address for a push of the specified size as
6758 * well as the new RSP value (upper bits may be masked).
6759 *
6760 * @returns Effective stack addressf for the push.
6761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6762 * @param cbItem The size of the stack item to pop.
6763 * @param puNewRsp Where to return the new RSP value.
6764 */
6765DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6766{
6767 RTUINT64U uTmpRsp;
6768 RTGCPTR GCPtrTop;
6769 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6770
6771 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6772 GCPtrTop = uTmpRsp.u -= cbItem;
6773 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6774 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6775 else
6776 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6777 *puNewRsp = uTmpRsp.u;
6778 return GCPtrTop;
6779}
6780
6781
6782/**
6783 * Gets the current stack pointer and calculates the value after a pop of the
6784 * specified size.
6785 *
6786 * @returns Current stack pointer.
6787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6788 * @param cbItem The size of the stack item to pop.
6789 * @param puNewRsp Where to return the new RSP value.
6790 */
6791DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6792{
6793 RTUINT64U uTmpRsp;
6794 RTGCPTR GCPtrTop;
6795 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6796
6797 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6798 {
6799 GCPtrTop = uTmpRsp.u;
6800 uTmpRsp.u += cbItem;
6801 }
6802 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6803 {
6804 GCPtrTop = uTmpRsp.DWords.dw0;
6805 uTmpRsp.DWords.dw0 += cbItem;
6806 }
6807 else
6808 {
6809 GCPtrTop = uTmpRsp.Words.w0;
6810 uTmpRsp.Words.w0 += cbItem;
6811 }
6812 *puNewRsp = uTmpRsp.u;
6813 return GCPtrTop;
6814}
6815
6816
6817/**
6818 * Calculates the effective stack address for a push of the specified size as
6819 * well as the new temporary RSP value (upper bits may be masked).
6820 *
6821 * @returns Effective stack addressf for the push.
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 * @param pTmpRsp The temporary stack pointer. This is updated.
6824 * @param cbItem The size of the stack item to pop.
6825 */
6826DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6827{
6828 RTGCPTR GCPtrTop;
6829
6830 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6831 GCPtrTop = pTmpRsp->u -= cbItem;
6832 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6833 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6834 else
6835 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6836 return GCPtrTop;
6837}
6838
6839
6840/**
6841 * Gets the effective stack address for a pop of the specified size and
6842 * calculates and updates the temporary RSP.
6843 *
6844 * @returns Current stack pointer.
6845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6846 * @param pTmpRsp The temporary stack pointer. This is updated.
6847 * @param cbItem The size of the stack item to pop.
6848 */
6849DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6850{
6851 RTGCPTR GCPtrTop;
6852 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6853 {
6854 GCPtrTop = pTmpRsp->u;
6855 pTmpRsp->u += cbItem;
6856 }
6857 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6858 {
6859 GCPtrTop = pTmpRsp->DWords.dw0;
6860 pTmpRsp->DWords.dw0 += cbItem;
6861 }
6862 else
6863 {
6864 GCPtrTop = pTmpRsp->Words.w0;
6865 pTmpRsp->Words.w0 += cbItem;
6866 }
6867 return GCPtrTop;
6868}
6869
6870/** @} */
6871
6872
6873/** @name FPU access and helpers.
6874 *
6875 * @{
6876 */
6877
6878
6879/**
6880 * Hook for preparing to use the host FPU.
6881 *
6882 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6883 *
6884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6885 */
6886DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6887{
6888#ifdef IN_RING3
6889 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6890#else
6891 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6892#endif
6893 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6894}
6895
6896
6897/**
6898 * Hook for preparing to use the host FPU for SSE.
6899 *
6900 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6901 *
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 */
6904DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6905{
6906 iemFpuPrepareUsage(pVCpu);
6907}
6908
6909
6910/**
6911 * Hook for preparing to use the host FPU for AVX.
6912 *
6913 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6914 *
6915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6916 */
6917DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6918{
6919 iemFpuPrepareUsage(pVCpu);
6920}
6921
6922
6923/**
6924 * Hook for actualizing the guest FPU state before the interpreter reads it.
6925 *
6926 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6927 *
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 */
6930DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6931{
6932#ifdef IN_RING3
6933 NOREF(pVCpu);
6934#else
6935 CPUMRZFpuStateActualizeForRead(pVCpu);
6936#endif
6937 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6938}
6939
6940
6941/**
6942 * Hook for actualizing the guest FPU state before the interpreter changes it.
6943 *
6944 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6945 *
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 */
6948DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6949{
6950#ifdef IN_RING3
6951 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6952#else
6953 CPUMRZFpuStateActualizeForChange(pVCpu);
6954#endif
6955 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6956}
6957
6958
6959/**
6960 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6961 * only.
6962 *
6963 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6964 *
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 */
6967DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6968{
6969#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6970 NOREF(pVCpu);
6971#else
6972 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6973#endif
6974 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6975}
6976
6977
6978/**
6979 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6980 * read+write.
6981 *
6982 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6983 *
6984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6985 */
6986DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6987{
6988#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6990#else
6991 CPUMRZFpuStateActualizeForChange(pVCpu);
6992#endif
6993 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6994
6995 /* Make sure any changes are loaded the next time around. */
6996 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr.bmXState |= XSAVE_C_SSE;
6997}
6998
6999
7000/**
7001 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7002 * only.
7003 *
7004 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7005 *
7006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7007 */
7008DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7009{
7010#ifdef IN_RING3
7011 NOREF(pVCpu);
7012#else
7013 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7014#endif
7015 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7016}
7017
7018
7019/**
7020 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7021 * read+write.
7022 *
7023 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 */
7027DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7028{
7029#ifdef IN_RING3
7030 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7031#else
7032 CPUMRZFpuStateActualizeForChange(pVCpu);
7033#endif
7034 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7035
7036 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7037 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7038}
7039
7040
7041/**
7042 * Stores a QNaN value into a FPU register.
7043 *
7044 * @param pReg Pointer to the register.
7045 */
7046DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7047{
7048 pReg->au32[0] = UINT32_C(0x00000000);
7049 pReg->au32[1] = UINT32_C(0xc0000000);
7050 pReg->au16[4] = UINT16_C(0xffff);
7051}
7052
7053
7054/**
7055 * Updates the FOP, FPU.CS and FPUIP registers.
7056 *
7057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7058 * @param pFpuCtx The FPU context.
7059 */
7060DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7061{
7062 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7063 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7064 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7065 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7066 {
7067 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7068 * happens in real mode here based on the fnsave and fnstenv images. */
7069 pFpuCtx->CS = 0;
7070 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7071 }
7072 else
7073 {
7074 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7075 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7076 }
7077}
7078
7079
7080/**
7081 * Updates the x87.DS and FPUDP registers.
7082 *
7083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7084 * @param pFpuCtx The FPU context.
7085 * @param iEffSeg The effective segment register.
7086 * @param GCPtrEff The effective address relative to @a iEffSeg.
7087 */
7088DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7089{
7090 RTSEL sel;
7091 switch (iEffSeg)
7092 {
7093 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7094 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7095 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7096 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7097 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7098 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7099 default:
7100 AssertMsgFailed(("%d\n", iEffSeg));
7101 sel = pVCpu->cpum.GstCtx.ds.Sel;
7102 }
7103 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7104 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7105 {
7106 pFpuCtx->DS = 0;
7107 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7108 }
7109 else
7110 {
7111 pFpuCtx->DS = sel;
7112 pFpuCtx->FPUDP = GCPtrEff;
7113 }
7114}
7115
7116
7117/**
7118 * Rotates the stack registers in the push direction.
7119 *
7120 * @param pFpuCtx The FPU context.
7121 * @remarks This is a complete waste of time, but fxsave stores the registers in
7122 * stack order.
7123 */
7124DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7125{
7126 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7127 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7128 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7129 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7130 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7131 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7132 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7133 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7134 pFpuCtx->aRegs[0].r80 = r80Tmp;
7135}
7136
7137
7138/**
7139 * Rotates the stack registers in the pop direction.
7140 *
7141 * @param pFpuCtx The FPU context.
7142 * @remarks This is a complete waste of time, but fxsave stores the registers in
7143 * stack order.
7144 */
7145DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7146{
7147 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7148 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7149 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7150 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7151 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7152 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7153 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7154 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7155 pFpuCtx->aRegs[7].r80 = r80Tmp;
7156}
7157
7158
7159/**
7160 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7161 * exception prevents it.
7162 *
7163 * @param pResult The FPU operation result to push.
7164 * @param pFpuCtx The FPU context.
7165 */
7166IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7167{
7168 /* Update FSW and bail if there are pending exceptions afterwards. */
7169 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7170 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7171 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7172 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7173 {
7174 pFpuCtx->FSW = fFsw;
7175 return;
7176 }
7177
7178 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7179 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7180 {
7181 /* All is fine, push the actual value. */
7182 pFpuCtx->FTW |= RT_BIT(iNewTop);
7183 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7184 }
7185 else if (pFpuCtx->FCW & X86_FCW_IM)
7186 {
7187 /* Masked stack overflow, push QNaN. */
7188 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7189 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7190 }
7191 else
7192 {
7193 /* Raise stack overflow, don't push anything. */
7194 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7195 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7196 return;
7197 }
7198
7199 fFsw &= ~X86_FSW_TOP_MASK;
7200 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7201 pFpuCtx->FSW = fFsw;
7202
7203 iemFpuRotateStackPush(pFpuCtx);
7204}
7205
7206
7207/**
7208 * Stores a result in a FPU register and updates the FSW and FTW.
7209 *
7210 * @param pFpuCtx The FPU context.
7211 * @param pResult The result to store.
7212 * @param iStReg Which FPU register to store it in.
7213 */
7214IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7215{
7216 Assert(iStReg < 8);
7217 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7218 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7219 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7220 pFpuCtx->FTW |= RT_BIT(iReg);
7221 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7222}
7223
7224
7225/**
7226 * Only updates the FPU status word (FSW) with the result of the current
7227 * instruction.
7228 *
7229 * @param pFpuCtx The FPU context.
7230 * @param u16FSW The FSW output of the current instruction.
7231 */
7232IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7233{
7234 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7235 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7236}
7237
7238
7239/**
7240 * Pops one item off the FPU stack if no pending exception prevents it.
7241 *
7242 * @param pFpuCtx The FPU context.
7243 */
7244IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7245{
7246 /* Check pending exceptions. */
7247 uint16_t uFSW = pFpuCtx->FSW;
7248 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7249 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7250 return;
7251
7252 /* TOP--. */
7253 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7254 uFSW &= ~X86_FSW_TOP_MASK;
7255 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7256 pFpuCtx->FSW = uFSW;
7257
7258 /* Mark the previous ST0 as empty. */
7259 iOldTop >>= X86_FSW_TOP_SHIFT;
7260 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7261
7262 /* Rotate the registers. */
7263 iemFpuRotateStackPop(pFpuCtx);
7264}
7265
7266
7267/**
7268 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7269 *
7270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7271 * @param pResult The FPU operation result to push.
7272 */
7273IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7274{
7275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7276 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7277 iemFpuMaybePushResult(pResult, pFpuCtx);
7278}
7279
7280
7281/**
7282 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7283 * and sets FPUDP and FPUDS.
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 * @param pResult The FPU operation result to push.
7287 * @param iEffSeg The effective segment register.
7288 * @param GCPtrEff The effective address relative to @a iEffSeg.
7289 */
7290IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7291{
7292 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7293 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7294 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7295 iemFpuMaybePushResult(pResult, pFpuCtx);
7296}
7297
7298
7299/**
7300 * Replace ST0 with the first value and push the second onto the FPU stack,
7301 * unless a pending exception prevents it.
7302 *
7303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7304 * @param pResult The FPU operation result to store and push.
7305 */
7306IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7307{
7308 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7309 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7310
7311 /* Update FSW and bail if there are pending exceptions afterwards. */
7312 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7313 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7314 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7315 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7316 {
7317 pFpuCtx->FSW = fFsw;
7318 return;
7319 }
7320
7321 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7322 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7323 {
7324 /* All is fine, push the actual value. */
7325 pFpuCtx->FTW |= RT_BIT(iNewTop);
7326 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7327 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7328 }
7329 else if (pFpuCtx->FCW & X86_FCW_IM)
7330 {
7331 /* Masked stack overflow, push QNaN. */
7332 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7333 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7334 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7335 }
7336 else
7337 {
7338 /* Raise stack overflow, don't push anything. */
7339 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7340 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7341 return;
7342 }
7343
7344 fFsw &= ~X86_FSW_TOP_MASK;
7345 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7346 pFpuCtx->FSW = fFsw;
7347
7348 iemFpuRotateStackPush(pFpuCtx);
7349}
7350
7351
7352/**
7353 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7354 * FOP.
7355 *
7356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7357 * @param pResult The result to store.
7358 * @param iStReg Which FPU register to store it in.
7359 */
7360IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7361{
7362 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7363 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7364 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7365}
7366
7367
7368/**
7369 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7370 * FOP, and then pops the stack.
7371 *
7372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7373 * @param pResult The result to store.
7374 * @param iStReg Which FPU register to store it in.
7375 */
7376IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7377{
7378 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7379 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7380 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7381 iemFpuMaybePopOne(pFpuCtx);
7382}
7383
7384
7385/**
7386 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7387 * FPUDP, and FPUDS.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param pResult The result to store.
7391 * @param iStReg Which FPU register to store it in.
7392 * @param iEffSeg The effective memory operand selector register.
7393 * @param GCPtrEff The effective memory operand offset.
7394 */
7395IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7396 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7397{
7398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7399 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7400 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7401 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7402}
7403
7404
7405/**
7406 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7407 * FPUDP, and FPUDS, and then pops the stack.
7408 *
7409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7410 * @param pResult The result to store.
7411 * @param iStReg Which FPU register to store it in.
7412 * @param iEffSeg The effective memory operand selector register.
7413 * @param GCPtrEff The effective memory operand offset.
7414 */
7415IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7416 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7417{
7418 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7419 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7420 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7421 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7422 iemFpuMaybePopOne(pFpuCtx);
7423}
7424
7425
7426/**
7427 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7428 *
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 */
7431IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7432{
7433 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7434 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7435}
7436
7437
7438/**
7439 * Marks the specified stack register as free (for FFREE).
7440 *
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param iStReg The register to free.
7443 */
7444IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7445{
7446 Assert(iStReg < 8);
7447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7448 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7449 pFpuCtx->FTW &= ~RT_BIT(iReg);
7450}
7451
7452
7453/**
7454 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7455 *
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 */
7458IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7459{
7460 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7461 uint16_t uFsw = pFpuCtx->FSW;
7462 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7463 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7464 uFsw &= ~X86_FSW_TOP_MASK;
7465 uFsw |= uTop;
7466 pFpuCtx->FSW = uFsw;
7467}
7468
7469
7470/**
7471 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7472 *
7473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7474 */
7475IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7476{
7477 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7478 uint16_t uFsw = pFpuCtx->FSW;
7479 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7480 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7481 uFsw &= ~X86_FSW_TOP_MASK;
7482 uFsw |= uTop;
7483 pFpuCtx->FSW = uFsw;
7484}
7485
7486
7487/**
7488 * Updates the FSW, FOP, FPUIP, and FPUCS.
7489 *
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param u16FSW The FSW from the current instruction.
7492 */
7493IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7494{
7495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7496 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7497 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7498}
7499
7500
7501/**
7502 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7503 *
7504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7505 * @param u16FSW The FSW from the current instruction.
7506 */
7507IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7508{
7509 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7510 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7511 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7512 iemFpuMaybePopOne(pFpuCtx);
7513}
7514
7515
7516/**
7517 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7518 *
7519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7520 * @param u16FSW The FSW from the current instruction.
7521 * @param iEffSeg The effective memory operand selector register.
7522 * @param GCPtrEff The effective memory operand offset.
7523 */
7524IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7525{
7526 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7527 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7528 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7529 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7530}
7531
7532
7533/**
7534 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7535 *
7536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7537 * @param u16FSW The FSW from the current instruction.
7538 */
7539IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7540{
7541 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7542 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7543 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7544 iemFpuMaybePopOne(pFpuCtx);
7545 iemFpuMaybePopOne(pFpuCtx);
7546}
7547
7548
7549/**
7550 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 * @param u16FSW The FSW from the current instruction.
7554 * @param iEffSeg The effective memory operand selector register.
7555 * @param GCPtrEff The effective memory operand offset.
7556 */
7557IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7558{
7559 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7560 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7561 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7562 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7563 iemFpuMaybePopOne(pFpuCtx);
7564}
7565
7566
7567/**
7568 * Worker routine for raising an FPU stack underflow exception.
7569 *
7570 * @param pFpuCtx The FPU context.
7571 * @param iStReg The stack register being accessed.
7572 */
7573IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7574{
7575 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7576 if (pFpuCtx->FCW & X86_FCW_IM)
7577 {
7578 /* Masked underflow. */
7579 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7580 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7581 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7582 if (iStReg != UINT8_MAX)
7583 {
7584 pFpuCtx->FTW |= RT_BIT(iReg);
7585 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7586 }
7587 }
7588 else
7589 {
7590 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7591 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7592 }
7593}
7594
7595
7596/**
7597 * Raises a FPU stack underflow exception.
7598 *
7599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7600 * @param iStReg The destination register that should be loaded
7601 * with QNaN if \#IS is not masked. Specify
7602 * UINT8_MAX if none (like for fcom).
7603 */
7604DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7605{
7606 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7607 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7608 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7609}
7610
7611
7612DECL_NO_INLINE(IEM_STATIC, void)
7613iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7614{
7615 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7616 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7617 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7618 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7619}
7620
7621
7622DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7623{
7624 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7626 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7627 iemFpuMaybePopOne(pFpuCtx);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void)
7632iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7633{
7634 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7635 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7636 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7637 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7638 iemFpuMaybePopOne(pFpuCtx);
7639}
7640
7641
7642DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7643{
7644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7647 iemFpuMaybePopOne(pFpuCtx);
7648 iemFpuMaybePopOne(pFpuCtx);
7649}
7650
7651
7652DECL_NO_INLINE(IEM_STATIC, void)
7653iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7654{
7655 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7656 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7657
7658 if (pFpuCtx->FCW & X86_FCW_IM)
7659 {
7660 /* Masked overflow - Push QNaN. */
7661 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7662 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7663 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7664 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7665 pFpuCtx->FTW |= RT_BIT(iNewTop);
7666 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7667 iemFpuRotateStackPush(pFpuCtx);
7668 }
7669 else
7670 {
7671 /* Exception pending - don't change TOP or the register stack. */
7672 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7673 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7674 }
7675}
7676
7677
7678DECL_NO_INLINE(IEM_STATIC, void)
7679iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7680{
7681 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7682 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7683
7684 if (pFpuCtx->FCW & X86_FCW_IM)
7685 {
7686 /* Masked overflow - Push QNaN. */
7687 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7688 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7689 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7690 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7691 pFpuCtx->FTW |= RT_BIT(iNewTop);
7692 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7693 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7694 iemFpuRotateStackPush(pFpuCtx);
7695 }
7696 else
7697 {
7698 /* Exception pending - don't change TOP or the register stack. */
7699 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7700 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7701 }
7702}
7703
7704
7705/**
7706 * Worker routine for raising an FPU stack overflow exception on a push.
7707 *
7708 * @param pFpuCtx The FPU context.
7709 */
7710IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7711{
7712 if (pFpuCtx->FCW & X86_FCW_IM)
7713 {
7714 /* Masked overflow. */
7715 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7716 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7717 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7718 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7719 pFpuCtx->FTW |= RT_BIT(iNewTop);
7720 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7721 iemFpuRotateStackPush(pFpuCtx);
7722 }
7723 else
7724 {
7725 /* Exception pending - don't change TOP or the register stack. */
7726 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7727 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7728 }
7729}
7730
7731
7732/**
7733 * Raises a FPU stack overflow exception on a push.
7734 *
7735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7736 */
7737DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7738{
7739 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7740 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7741 iemFpuStackPushOverflowOnly(pFpuCtx);
7742}
7743
7744
7745/**
7746 * Raises a FPU stack overflow exception on a push with a memory operand.
7747 *
7748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7749 * @param iEffSeg The effective memory operand selector register.
7750 * @param GCPtrEff The effective memory operand offset.
7751 */
7752DECL_NO_INLINE(IEM_STATIC, void)
7753iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7754{
7755 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7756 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7758 iemFpuStackPushOverflowOnly(pFpuCtx);
7759}
7760
7761
7762IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7763{
7764 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7765 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7766 if (pFpuCtx->FTW & RT_BIT(iReg))
7767 return VINF_SUCCESS;
7768 return VERR_NOT_FOUND;
7769}
7770
7771
7772IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7773{
7774 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7775 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7776 if (pFpuCtx->FTW & RT_BIT(iReg))
7777 {
7778 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7779 return VINF_SUCCESS;
7780 }
7781 return VERR_NOT_FOUND;
7782}
7783
7784
7785IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7786 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7787{
7788 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7789 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7790 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7791 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7792 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7793 {
7794 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7795 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7796 return VINF_SUCCESS;
7797 }
7798 return VERR_NOT_FOUND;
7799}
7800
7801
7802IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7803{
7804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7805 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7806 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7807 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7808 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7809 {
7810 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7811 return VINF_SUCCESS;
7812 }
7813 return VERR_NOT_FOUND;
7814}
7815
7816
7817/**
7818 * Updates the FPU exception status after FCW is changed.
7819 *
7820 * @param pFpuCtx The FPU context.
7821 */
7822IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7823{
7824 uint16_t u16Fsw = pFpuCtx->FSW;
7825 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7826 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7827 else
7828 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7829 pFpuCtx->FSW = u16Fsw;
7830}
7831
7832
7833/**
7834 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7835 *
7836 * @returns The full FTW.
7837 * @param pFpuCtx The FPU context.
7838 */
7839IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7840{
7841 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7842 uint16_t u16Ftw = 0;
7843 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7844 for (unsigned iSt = 0; iSt < 8; iSt++)
7845 {
7846 unsigned const iReg = (iSt + iTop) & 7;
7847 if (!(u8Ftw & RT_BIT(iReg)))
7848 u16Ftw |= 3 << (iReg * 2); /* empty */
7849 else
7850 {
7851 uint16_t uTag;
7852 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7853 if (pr80Reg->s.uExponent == 0x7fff)
7854 uTag = 2; /* Exponent is all 1's => Special. */
7855 else if (pr80Reg->s.uExponent == 0x0000)
7856 {
7857 if (pr80Reg->s.u64Mantissa == 0x0000)
7858 uTag = 1; /* All bits are zero => Zero. */
7859 else
7860 uTag = 2; /* Must be special. */
7861 }
7862 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7863 uTag = 0; /* Valid. */
7864 else
7865 uTag = 2; /* Must be special. */
7866
7867 u16Ftw |= uTag << (iReg * 2); /* empty */
7868 }
7869 }
7870
7871 return u16Ftw;
7872}
7873
7874
7875/**
7876 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7877 *
7878 * @returns The compressed FTW.
7879 * @param u16FullFtw The full FTW to convert.
7880 */
7881IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7882{
7883 uint8_t u8Ftw = 0;
7884 for (unsigned i = 0; i < 8; i++)
7885 {
7886 if ((u16FullFtw & 3) != 3 /*empty*/)
7887 u8Ftw |= RT_BIT(i);
7888 u16FullFtw >>= 2;
7889 }
7890
7891 return u8Ftw;
7892}
7893
7894/** @} */
7895
7896
7897/** @name Memory access.
7898 *
7899 * @{
7900 */
7901
7902
7903/**
7904 * Updates the IEMCPU::cbWritten counter if applicable.
7905 *
7906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7907 * @param fAccess The access being accounted for.
7908 * @param cbMem The access size.
7909 */
7910DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7911{
7912 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7913 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7914 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7915}
7916
7917
7918/**
7919 * Checks if the given segment can be written to, raise the appropriate
7920 * exception if not.
7921 *
7922 * @returns VBox strict status code.
7923 *
7924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7925 * @param pHid Pointer to the hidden register.
7926 * @param iSegReg The register number.
7927 * @param pu64BaseAddr Where to return the base address to use for the
7928 * segment. (In 64-bit code it may differ from the
7929 * base in the hidden segment.)
7930 */
7931IEM_STATIC VBOXSTRICTRC
7932iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7933{
7934 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7935
7936 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7937 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7938 else
7939 {
7940 if (!pHid->Attr.n.u1Present)
7941 {
7942 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7943 AssertRelease(uSel == 0);
7944 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7945 return iemRaiseGeneralProtectionFault0(pVCpu);
7946 }
7947
7948 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7949 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7950 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7951 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7952 *pu64BaseAddr = pHid->u64Base;
7953 }
7954 return VINF_SUCCESS;
7955}
7956
7957
7958/**
7959 * Checks if the given segment can be read from, raise the appropriate
7960 * exception if not.
7961 *
7962 * @returns VBox strict status code.
7963 *
7964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7965 * @param pHid Pointer to the hidden register.
7966 * @param iSegReg The register number.
7967 * @param pu64BaseAddr Where to return the base address to use for the
7968 * segment. (In 64-bit code it may differ from the
7969 * base in the hidden segment.)
7970 */
7971IEM_STATIC VBOXSTRICTRC
7972iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7973{
7974 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7975
7976 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7977 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7978 else
7979 {
7980 if (!pHid->Attr.n.u1Present)
7981 {
7982 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7983 AssertRelease(uSel == 0);
7984 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7985 return iemRaiseGeneralProtectionFault0(pVCpu);
7986 }
7987
7988 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7989 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7990 *pu64BaseAddr = pHid->u64Base;
7991 }
7992 return VINF_SUCCESS;
7993}
7994
7995
7996/**
7997 * Applies the segment limit, base and attributes.
7998 *
7999 * This may raise a \#GP or \#SS.
8000 *
8001 * @returns VBox strict status code.
8002 *
8003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8004 * @param fAccess The kind of access which is being performed.
8005 * @param iSegReg The index of the segment register to apply.
8006 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8007 * TSS, ++).
8008 * @param cbMem The access size.
8009 * @param pGCPtrMem Pointer to the guest memory address to apply
8010 * segmentation to. Input and output parameter.
8011 */
8012IEM_STATIC VBOXSTRICTRC
8013iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8014{
8015 if (iSegReg == UINT8_MAX)
8016 return VINF_SUCCESS;
8017
8018 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8019 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8020 switch (pVCpu->iem.s.enmCpuMode)
8021 {
8022 case IEMMODE_16BIT:
8023 case IEMMODE_32BIT:
8024 {
8025 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8026 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8027
8028 if ( pSel->Attr.n.u1Present
8029 && !pSel->Attr.n.u1Unusable)
8030 {
8031 Assert(pSel->Attr.n.u1DescType);
8032 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8033 {
8034 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8035 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8036 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8037
8038 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8039 {
8040 /** @todo CPL check. */
8041 }
8042
8043 /*
8044 * There are two kinds of data selectors, normal and expand down.
8045 */
8046 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8047 {
8048 if ( GCPtrFirst32 > pSel->u32Limit
8049 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8050 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8051 }
8052 else
8053 {
8054 /*
8055 * The upper boundary is defined by the B bit, not the G bit!
8056 */
8057 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8058 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8059 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8060 }
8061 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8062 }
8063 else
8064 {
8065
8066 /*
8067 * Code selector and usually be used to read thru, writing is
8068 * only permitted in real and V8086 mode.
8069 */
8070 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8071 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8072 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8073 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8074 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8075
8076 if ( GCPtrFirst32 > pSel->u32Limit
8077 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8078 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8079
8080 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8081 {
8082 /** @todo CPL check. */
8083 }
8084
8085 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8086 }
8087 }
8088 else
8089 return iemRaiseGeneralProtectionFault0(pVCpu);
8090 return VINF_SUCCESS;
8091 }
8092
8093 case IEMMODE_64BIT:
8094 {
8095 RTGCPTR GCPtrMem = *pGCPtrMem;
8096 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8097 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8098
8099 Assert(cbMem >= 1);
8100 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8101 return VINF_SUCCESS;
8102 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8103 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8104 return iemRaiseGeneralProtectionFault0(pVCpu);
8105 }
8106
8107 default:
8108 AssertFailedReturn(VERR_IEM_IPE_7);
8109 }
8110}
8111
8112
8113/**
8114 * Translates a virtual address to a physical physical address and checks if we
8115 * can access the page as specified.
8116 *
8117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8118 * @param GCPtrMem The virtual address.
8119 * @param fAccess The intended access.
8120 * @param pGCPhysMem Where to return the physical address.
8121 */
8122IEM_STATIC VBOXSTRICTRC
8123iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8124{
8125 /** @todo Need a different PGM interface here. We're currently using
8126 * generic / REM interfaces. this won't cut it for R0. */
8127 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8128 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8129 * here. */
8130 RTGCPHYS GCPhys;
8131 uint64_t fFlags;
8132 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8133 if (RT_FAILURE(rc))
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8136 /** @todo Check unassigned memory in unpaged mode. */
8137 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8138 *pGCPhysMem = NIL_RTGCPHYS;
8139 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8140 }
8141
8142 /* If the page is writable and does not have the no-exec bit set, all
8143 access is allowed. Otherwise we'll have to check more carefully... */
8144 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8145 {
8146 /* Write to read only memory? */
8147 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8148 && !(fFlags & X86_PTE_RW)
8149 && ( (pVCpu->iem.s.uCpl == 3
8150 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8151 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8152 {
8153 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8154 *pGCPhysMem = NIL_RTGCPHYS;
8155 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8156 }
8157
8158 /* Kernel memory accessed by userland? */
8159 if ( !(fFlags & X86_PTE_US)
8160 && pVCpu->iem.s.uCpl == 3
8161 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8162 {
8163 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8164 *pGCPhysMem = NIL_RTGCPHYS;
8165 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8166 }
8167
8168 /* Executing non-executable memory? */
8169 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8170 && (fFlags & X86_PTE_PAE_NX)
8171 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8172 {
8173 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8174 *pGCPhysMem = NIL_RTGCPHYS;
8175 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8176 VERR_ACCESS_DENIED);
8177 }
8178 }
8179
8180 /*
8181 * Set the dirty / access flags.
8182 * ASSUMES this is set when the address is translated rather than on committ...
8183 */
8184 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8185 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8186 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8187 {
8188 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8189 AssertRC(rc2);
8190 }
8191
8192 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8193 *pGCPhysMem = GCPhys;
8194 return VINF_SUCCESS;
8195}
8196
8197
8198
8199/**
8200 * Maps a physical page.
8201 *
8202 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8204 * @param GCPhysMem The physical address.
8205 * @param fAccess The intended access.
8206 * @param ppvMem Where to return the mapping address.
8207 * @param pLock The PGM lock.
8208 */
8209IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8210{
8211#ifdef IEM_LOG_MEMORY_WRITES
8212 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8213 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8214#endif
8215
8216 /** @todo This API may require some improving later. A private deal with PGM
8217 * regarding locking and unlocking needs to be struct. A couple of TLBs
8218 * living in PGM, but with publicly accessible inlined access methods
8219 * could perhaps be an even better solution. */
8220 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8221 GCPhysMem,
8222 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8223 pVCpu->iem.s.fBypassHandlers,
8224 ppvMem,
8225 pLock);
8226 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8227 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8228
8229 return rc;
8230}
8231
8232
8233/**
8234 * Unmap a page previously mapped by iemMemPageMap.
8235 *
8236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8237 * @param GCPhysMem The physical address.
8238 * @param fAccess The intended access.
8239 * @param pvMem What iemMemPageMap returned.
8240 * @param pLock The PGM lock.
8241 */
8242DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8243{
8244 NOREF(pVCpu);
8245 NOREF(GCPhysMem);
8246 NOREF(fAccess);
8247 NOREF(pvMem);
8248 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8249}
8250
8251
8252/**
8253 * Looks up a memory mapping entry.
8254 *
8255 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8257 * @param pvMem The memory address.
8258 * @param fAccess The access to.
8259 */
8260DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8261{
8262 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8263 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8264 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8265 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8266 return 0;
8267 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8268 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8269 return 1;
8270 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8271 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8272 return 2;
8273 return VERR_NOT_FOUND;
8274}
8275
8276
8277/**
8278 * Finds a free memmap entry when using iNextMapping doesn't work.
8279 *
8280 * @returns Memory mapping index, 1024 on failure.
8281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8282 */
8283IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8284{
8285 /*
8286 * The easy case.
8287 */
8288 if (pVCpu->iem.s.cActiveMappings == 0)
8289 {
8290 pVCpu->iem.s.iNextMapping = 1;
8291 return 0;
8292 }
8293
8294 /* There should be enough mappings for all instructions. */
8295 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8296
8297 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8298 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8299 return i;
8300
8301 AssertFailedReturn(1024);
8302}
8303
8304
8305/**
8306 * Commits a bounce buffer that needs writing back and unmaps it.
8307 *
8308 * @returns Strict VBox status code.
8309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8310 * @param iMemMap The index of the buffer to commit.
8311 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8312 * Always false in ring-3, obviously.
8313 */
8314IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8315{
8316 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8317 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8318#ifdef IN_RING3
8319 Assert(!fPostponeFail);
8320 RT_NOREF_PV(fPostponeFail);
8321#endif
8322
8323 /*
8324 * Do the writing.
8325 */
8326 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8327 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8328 {
8329 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8330 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8331 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8332 if (!pVCpu->iem.s.fBypassHandlers)
8333 {
8334 /*
8335 * Carefully and efficiently dealing with access handler return
8336 * codes make this a little bloated.
8337 */
8338 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8340 pbBuf,
8341 cbFirst,
8342 PGMACCESSORIGIN_IEM);
8343 if (rcStrict == VINF_SUCCESS)
8344 {
8345 if (cbSecond)
8346 {
8347 rcStrict = PGMPhysWrite(pVM,
8348 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8349 pbBuf + cbFirst,
8350 cbSecond,
8351 PGMACCESSORIGIN_IEM);
8352 if (rcStrict == VINF_SUCCESS)
8353 { /* nothing */ }
8354 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8355 {
8356 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8359 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8360 }
8361#ifndef IN_RING3
8362 else if (fPostponeFail)
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8368 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8369 return iemSetPassUpStatus(pVCpu, rcStrict);
8370 }
8371#endif
8372 else
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8377 return rcStrict;
8378 }
8379 }
8380 }
8381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8382 {
8383 if (!cbSecond)
8384 {
8385 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8387 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8388 }
8389 else
8390 {
8391 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8393 pbBuf + cbFirst,
8394 cbSecond,
8395 PGMACCESSORIGIN_IEM);
8396 if (rcStrict2 == VINF_SUCCESS)
8397 {
8398 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8401 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8402 }
8403 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8404 {
8405 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8406 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8408 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8409 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8410 }
8411#ifndef IN_RING3
8412 else if (fPostponeFail)
8413 {
8414 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8417 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8418 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8419 return iemSetPassUpStatus(pVCpu, rcStrict);
8420 }
8421#endif
8422 else
8423 {
8424 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8427 return rcStrict2;
8428 }
8429 }
8430 }
8431#ifndef IN_RING3
8432 else if (fPostponeFail)
8433 {
8434 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8437 if (!cbSecond)
8438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8439 else
8440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8441 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8442 return iemSetPassUpStatus(pVCpu, rcStrict);
8443 }
8444#endif
8445 else
8446 {
8447 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8450 return rcStrict;
8451 }
8452 }
8453 else
8454 {
8455 /*
8456 * No access handlers, much simpler.
8457 */
8458 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8459 if (RT_SUCCESS(rc))
8460 {
8461 if (cbSecond)
8462 {
8463 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8464 if (RT_SUCCESS(rc))
8465 { /* likely */ }
8466 else
8467 {
8468 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8471 return rc;
8472 }
8473 }
8474 }
8475 else
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8480 return rc;
8481 }
8482 }
8483 }
8484
8485#if defined(IEM_LOG_MEMORY_WRITES)
8486 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8487 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8488 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8489 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8490 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8491 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8492
8493 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8494 g_cbIemWrote = cbWrote;
8495 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8496#endif
8497
8498 /*
8499 * Free the mapping entry.
8500 */
8501 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8502 Assert(pVCpu->iem.s.cActiveMappings != 0);
8503 pVCpu->iem.s.cActiveMappings--;
8504 return VINF_SUCCESS;
8505}
8506
8507
8508/**
8509 * iemMemMap worker that deals with a request crossing pages.
8510 */
8511IEM_STATIC VBOXSTRICTRC
8512iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8513{
8514 /*
8515 * Do the address translations.
8516 */
8517 RTGCPHYS GCPhysFirst;
8518 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8519 if (rcStrict != VINF_SUCCESS)
8520 return rcStrict;
8521
8522 RTGCPHYS GCPhysSecond;
8523 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8524 fAccess, &GCPhysSecond);
8525 if (rcStrict != VINF_SUCCESS)
8526 return rcStrict;
8527 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8528
8529 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8530
8531 /*
8532 * Read in the current memory content if it's a read, execute or partial
8533 * write access.
8534 */
8535 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8536 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8537 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8538
8539 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8540 {
8541 if (!pVCpu->iem.s.fBypassHandlers)
8542 {
8543 /*
8544 * Must carefully deal with access handler status codes here,
8545 * makes the code a bit bloated.
8546 */
8547 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8548 if (rcStrict == VINF_SUCCESS)
8549 {
8550 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8551 if (rcStrict == VINF_SUCCESS)
8552 { /*likely */ }
8553 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8554 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8555 else
8556 {
8557 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8558 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8559 return rcStrict;
8560 }
8561 }
8562 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8563 {
8564 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8565 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8566 {
8567 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8569 }
8570 else
8571 {
8572 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8573 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8574 return rcStrict2;
8575 }
8576 }
8577 else
8578 {
8579 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8580 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8581 return rcStrict;
8582 }
8583 }
8584 else
8585 {
8586 /*
8587 * No informational status codes here, much more straight forward.
8588 */
8589 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8590 if (RT_SUCCESS(rc))
8591 {
8592 Assert(rc == VINF_SUCCESS);
8593 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8594 if (RT_SUCCESS(rc))
8595 Assert(rc == VINF_SUCCESS);
8596 else
8597 {
8598 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8599 return rc;
8600 }
8601 }
8602 else
8603 {
8604 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8605 return rc;
8606 }
8607 }
8608 }
8609#ifdef VBOX_STRICT
8610 else
8611 memset(pbBuf, 0xcc, cbMem);
8612 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8613 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8614#endif
8615
8616 /*
8617 * Commit the bounce buffer entry.
8618 */
8619 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8620 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8621 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8622 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8623 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8624 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8625 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8626 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8627 pVCpu->iem.s.cActiveMappings++;
8628
8629 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8630 *ppvMem = pbBuf;
8631 return VINF_SUCCESS;
8632}
8633
8634
8635/**
8636 * iemMemMap woker that deals with iemMemPageMap failures.
8637 */
8638IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8639 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8640{
8641 /*
8642 * Filter out conditions we can handle and the ones which shouldn't happen.
8643 */
8644 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8645 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8646 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8647 {
8648 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8649 return rcMap;
8650 }
8651 pVCpu->iem.s.cPotentialExits++;
8652
8653 /*
8654 * Read in the current memory content if it's a read, execute or partial
8655 * write access.
8656 */
8657 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8658 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8659 {
8660 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8661 memset(pbBuf, 0xff, cbMem);
8662 else
8663 {
8664 int rc;
8665 if (!pVCpu->iem.s.fBypassHandlers)
8666 {
8667 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8668 if (rcStrict == VINF_SUCCESS)
8669 { /* nothing */ }
8670 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8671 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8672 else
8673 {
8674 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8675 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8676 return rcStrict;
8677 }
8678 }
8679 else
8680 {
8681 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8682 if (RT_SUCCESS(rc))
8683 { /* likely */ }
8684 else
8685 {
8686 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8687 GCPhysFirst, rc));
8688 return rc;
8689 }
8690 }
8691 }
8692 }
8693#ifdef VBOX_STRICT
8694 else
8695 memset(pbBuf, 0xcc, cbMem);
8696#endif
8697#ifdef VBOX_STRICT
8698 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8699 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8700#endif
8701
8702 /*
8703 * Commit the bounce buffer entry.
8704 */
8705 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8706 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8707 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8708 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8709 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8710 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8711 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8712 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8713 pVCpu->iem.s.cActiveMappings++;
8714
8715 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8716 *ppvMem = pbBuf;
8717 return VINF_SUCCESS;
8718}
8719
8720
8721
8722/**
8723 * Maps the specified guest memory for the given kind of access.
8724 *
8725 * This may be using bounce buffering of the memory if it's crossing a page
8726 * boundary or if there is an access handler installed for any of it. Because
8727 * of lock prefix guarantees, we're in for some extra clutter when this
8728 * happens.
8729 *
8730 * This may raise a \#GP, \#SS, \#PF or \#AC.
8731 *
8732 * @returns VBox strict status code.
8733 *
8734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8735 * @param ppvMem Where to return the pointer to the mapped
8736 * memory.
8737 * @param cbMem The number of bytes to map. This is usually 1,
8738 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8739 * string operations it can be up to a page.
8740 * @param iSegReg The index of the segment register to use for
8741 * this access. The base and limits are checked.
8742 * Use UINT8_MAX to indicate that no segmentation
8743 * is required (for IDT, GDT and LDT accesses).
8744 * @param GCPtrMem The address of the guest memory.
8745 * @param fAccess How the memory is being accessed. The
8746 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8747 * how to map the memory, while the
8748 * IEM_ACCESS_WHAT_XXX bit is used when raising
8749 * exceptions.
8750 */
8751IEM_STATIC VBOXSTRICTRC
8752iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8753{
8754 /*
8755 * Check the input and figure out which mapping entry to use.
8756 */
8757 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8758 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8759 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8760
8761 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8762 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8763 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8764 {
8765 iMemMap = iemMemMapFindFree(pVCpu);
8766 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8767 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8768 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8769 pVCpu->iem.s.aMemMappings[2].fAccess),
8770 VERR_IEM_IPE_9);
8771 }
8772
8773 /*
8774 * Map the memory, checking that we can actually access it. If something
8775 * slightly complicated happens, fall back on bounce buffering.
8776 */
8777 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8778 if (rcStrict != VINF_SUCCESS)
8779 return rcStrict;
8780
8781 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8782 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8783
8784 RTGCPHYS GCPhysFirst;
8785 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8786 if (rcStrict != VINF_SUCCESS)
8787 return rcStrict;
8788
8789 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8790 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8791 if (fAccess & IEM_ACCESS_TYPE_READ)
8792 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8793
8794 void *pvMem;
8795 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8796 if (rcStrict != VINF_SUCCESS)
8797 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8798
8799 /*
8800 * Fill in the mapping table entry.
8801 */
8802 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8803 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8804 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8805 pVCpu->iem.s.cActiveMappings++;
8806
8807 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8808 *ppvMem = pvMem;
8809
8810 return VINF_SUCCESS;
8811}
8812
8813
8814/**
8815 * Commits the guest memory if bounce buffered and unmaps it.
8816 *
8817 * @returns Strict VBox status code.
8818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8819 * @param pvMem The mapping.
8820 * @param fAccess The kind of access.
8821 */
8822IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8823{
8824 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8825 AssertReturn(iMemMap >= 0, iMemMap);
8826
8827 /* If it's bounce buffered, we may need to write back the buffer. */
8828 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8829 {
8830 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8831 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8832 }
8833 /* Otherwise unlock it. */
8834 else
8835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8836
8837 /* Free the entry. */
8838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8839 Assert(pVCpu->iem.s.cActiveMappings != 0);
8840 pVCpu->iem.s.cActiveMappings--;
8841 return VINF_SUCCESS;
8842}
8843
8844#ifdef IEM_WITH_SETJMP
8845
8846/**
8847 * Maps the specified guest memory for the given kind of access, longjmp on
8848 * error.
8849 *
8850 * This may be using bounce buffering of the memory if it's crossing a page
8851 * boundary or if there is an access handler installed for any of it. Because
8852 * of lock prefix guarantees, we're in for some extra clutter when this
8853 * happens.
8854 *
8855 * This may raise a \#GP, \#SS, \#PF or \#AC.
8856 *
8857 * @returns Pointer to the mapped memory.
8858 *
8859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8860 * @param cbMem The number of bytes to map. This is usually 1,
8861 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8862 * string operations it can be up to a page.
8863 * @param iSegReg The index of the segment register to use for
8864 * this access. The base and limits are checked.
8865 * Use UINT8_MAX to indicate that no segmentation
8866 * is required (for IDT, GDT and LDT accesses).
8867 * @param GCPtrMem The address of the guest memory.
8868 * @param fAccess How the memory is being accessed. The
8869 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8870 * how to map the memory, while the
8871 * IEM_ACCESS_WHAT_XXX bit is used when raising
8872 * exceptions.
8873 */
8874IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8875{
8876 /*
8877 * Check the input and figure out which mapping entry to use.
8878 */
8879 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8880 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8881 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8882
8883 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8884 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8885 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8886 {
8887 iMemMap = iemMemMapFindFree(pVCpu);
8888 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8889 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8890 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8891 pVCpu->iem.s.aMemMappings[2].fAccess),
8892 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8893 }
8894
8895 /*
8896 * Map the memory, checking that we can actually access it. If something
8897 * slightly complicated happens, fall back on bounce buffering.
8898 */
8899 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8900 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8901 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8902
8903 /* Crossing a page boundary? */
8904 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8905 { /* No (likely). */ }
8906 else
8907 {
8908 void *pvMem;
8909 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8910 if (rcStrict == VINF_SUCCESS)
8911 return pvMem;
8912 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8913 }
8914
8915 RTGCPHYS GCPhysFirst;
8916 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8917 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8918 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8919
8920 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8921 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8922 if (fAccess & IEM_ACCESS_TYPE_READ)
8923 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8924
8925 void *pvMem;
8926 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8927 if (rcStrict == VINF_SUCCESS)
8928 { /* likely */ }
8929 else
8930 {
8931 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8932 if (rcStrict == VINF_SUCCESS)
8933 return pvMem;
8934 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8935 }
8936
8937 /*
8938 * Fill in the mapping table entry.
8939 */
8940 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8941 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8942 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8943 pVCpu->iem.s.cActiveMappings++;
8944
8945 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8946 return pvMem;
8947}
8948
8949
8950/**
8951 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8952 *
8953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8954 * @param pvMem The mapping.
8955 * @param fAccess The kind of access.
8956 */
8957IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8958{
8959 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8960 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8961
8962 /* If it's bounce buffered, we may need to write back the buffer. */
8963 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8964 {
8965 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8966 {
8967 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8968 if (rcStrict == VINF_SUCCESS)
8969 return;
8970 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8971 }
8972 }
8973 /* Otherwise unlock it. */
8974 else
8975 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8976
8977 /* Free the entry. */
8978 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8979 Assert(pVCpu->iem.s.cActiveMappings != 0);
8980 pVCpu->iem.s.cActiveMappings--;
8981}
8982
8983#endif /* IEM_WITH_SETJMP */
8984
8985#ifndef IN_RING3
8986/**
8987 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8988 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8989 *
8990 * Allows the instruction to be completed and retired, while the IEM user will
8991 * return to ring-3 immediately afterwards and do the postponed writes there.
8992 *
8993 * @returns VBox status code (no strict statuses). Caller must check
8994 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8996 * @param pvMem The mapping.
8997 * @param fAccess The kind of access.
8998 */
8999IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9000{
9001 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9002 AssertReturn(iMemMap >= 0, iMemMap);
9003
9004 /* If it's bounce buffered, we may need to write back the buffer. */
9005 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9006 {
9007 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9008 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9009 }
9010 /* Otherwise unlock it. */
9011 else
9012 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9013
9014 /* Free the entry. */
9015 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9016 Assert(pVCpu->iem.s.cActiveMappings != 0);
9017 pVCpu->iem.s.cActiveMappings--;
9018 return VINF_SUCCESS;
9019}
9020#endif
9021
9022
9023/**
9024 * Rollbacks mappings, releasing page locks and such.
9025 *
9026 * The caller shall only call this after checking cActiveMappings.
9027 *
9028 * @returns Strict VBox status code to pass up.
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 */
9031IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9032{
9033 Assert(pVCpu->iem.s.cActiveMappings > 0);
9034
9035 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9036 while (iMemMap-- > 0)
9037 {
9038 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9039 if (fAccess != IEM_ACCESS_INVALID)
9040 {
9041 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9043 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9044 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9045 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9046 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9047 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9049 pVCpu->iem.s.cActiveMappings--;
9050 }
9051 }
9052}
9053
9054
9055/**
9056 * Fetches a data byte.
9057 *
9058 * @returns Strict VBox status code.
9059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9060 * @param pu8Dst Where to return the byte.
9061 * @param iSegReg The index of the segment register to use for
9062 * this access. The base and limits are checked.
9063 * @param GCPtrMem The address of the guest memory.
9064 */
9065IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9066{
9067 /* The lazy approach for now... */
9068 uint8_t const *pu8Src;
9069 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9070 if (rc == VINF_SUCCESS)
9071 {
9072 *pu8Dst = *pu8Src;
9073 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9074 }
9075 return rc;
9076}
9077
9078
9079#ifdef IEM_WITH_SETJMP
9080/**
9081 * Fetches a data byte, longjmp on error.
9082 *
9083 * @returns The byte.
9084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9085 * @param iSegReg The index of the segment register to use for
9086 * this access. The base and limits are checked.
9087 * @param GCPtrMem The address of the guest memory.
9088 */
9089DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9090{
9091 /* The lazy approach for now... */
9092 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9093 uint8_t const bRet = *pu8Src;
9094 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9095 return bRet;
9096}
9097#endif /* IEM_WITH_SETJMP */
9098
9099
9100/**
9101 * Fetches a data word.
9102 *
9103 * @returns Strict VBox status code.
9104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9105 * @param pu16Dst Where to return the word.
9106 * @param iSegReg The index of the segment register to use for
9107 * this access. The base and limits are checked.
9108 * @param GCPtrMem The address of the guest memory.
9109 */
9110IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9111{
9112 /* The lazy approach for now... */
9113 uint16_t const *pu16Src;
9114 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9115 if (rc == VINF_SUCCESS)
9116 {
9117 *pu16Dst = *pu16Src;
9118 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9119 }
9120 return rc;
9121}
9122
9123
9124#ifdef IEM_WITH_SETJMP
9125/**
9126 * Fetches a data word, longjmp on error.
9127 *
9128 * @returns The word
9129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9130 * @param iSegReg The index of the segment register to use for
9131 * this access. The base and limits are checked.
9132 * @param GCPtrMem The address of the guest memory.
9133 */
9134DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9135{
9136 /* The lazy approach for now... */
9137 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9138 uint16_t const u16Ret = *pu16Src;
9139 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9140 return u16Ret;
9141}
9142#endif
9143
9144
9145/**
9146 * Fetches a data dword.
9147 *
9148 * @returns Strict VBox status code.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param pu32Dst Where to return the dword.
9151 * @param iSegReg The index of the segment register to use for
9152 * this access. The base and limits are checked.
9153 * @param GCPtrMem The address of the guest memory.
9154 */
9155IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9156{
9157 /* The lazy approach for now... */
9158 uint32_t const *pu32Src;
9159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9160 if (rc == VINF_SUCCESS)
9161 {
9162 *pu32Dst = *pu32Src;
9163 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9164 }
9165 return rc;
9166}
9167
9168
9169/**
9170 * Fetches a data dword and zero extends it to a qword.
9171 *
9172 * @returns Strict VBox status code.
9173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9174 * @param pu64Dst Where to return the qword.
9175 * @param iSegReg The index of the segment register to use for
9176 * this access. The base and limits are checked.
9177 * @param GCPtrMem The address of the guest memory.
9178 */
9179IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9180{
9181 /* The lazy approach for now... */
9182 uint32_t const *pu32Src;
9183 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9184 if (rc == VINF_SUCCESS)
9185 {
9186 *pu64Dst = *pu32Src;
9187 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9188 }
9189 return rc;
9190}
9191
9192
9193#ifdef IEM_WITH_SETJMP
9194
9195IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9196{
9197 Assert(cbMem >= 1);
9198 Assert(iSegReg < X86_SREG_COUNT);
9199
9200 /*
9201 * 64-bit mode is simpler.
9202 */
9203 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9204 {
9205 if (iSegReg >= X86_SREG_FS)
9206 {
9207 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9208 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9209 GCPtrMem += pSel->u64Base;
9210 }
9211
9212 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9213 return GCPtrMem;
9214 }
9215 /*
9216 * 16-bit and 32-bit segmentation.
9217 */
9218 else
9219 {
9220 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9221 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9222 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9223 == X86DESCATTR_P /* data, expand up */
9224 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9225 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9226 {
9227 /* expand up */
9228 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9229 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9230 && GCPtrLast32 > (uint32_t)GCPtrMem))
9231 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9232 }
9233 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9234 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9235 {
9236 /* expand down */
9237 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9238 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9239 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9240 && GCPtrLast32 > (uint32_t)GCPtrMem))
9241 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9242 }
9243 else
9244 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9245 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9246 }
9247 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9248}
9249
9250
9251IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9252{
9253 Assert(cbMem >= 1);
9254 Assert(iSegReg < X86_SREG_COUNT);
9255
9256 /*
9257 * 64-bit mode is simpler.
9258 */
9259 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9260 {
9261 if (iSegReg >= X86_SREG_FS)
9262 {
9263 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9264 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9265 GCPtrMem += pSel->u64Base;
9266 }
9267
9268 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9269 return GCPtrMem;
9270 }
9271 /*
9272 * 16-bit and 32-bit segmentation.
9273 */
9274 else
9275 {
9276 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9277 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9278 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9279 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9280 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9281 {
9282 /* expand up */
9283 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9284 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9285 && GCPtrLast32 > (uint32_t)GCPtrMem))
9286 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9287 }
9288 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9289 {
9290 /* expand down */
9291 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9292 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9293 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9294 && GCPtrLast32 > (uint32_t)GCPtrMem))
9295 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9296 }
9297 else
9298 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9299 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9300 }
9301 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9302}
9303
9304
9305/**
9306 * Fetches a data dword, longjmp on error, fallback/safe version.
9307 *
9308 * @returns The dword
9309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9310 * @param iSegReg The index of the segment register to use for
9311 * this access. The base and limits are checked.
9312 * @param GCPtrMem The address of the guest memory.
9313 */
9314IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9315{
9316 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9317 uint32_t const u32Ret = *pu32Src;
9318 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9319 return u32Ret;
9320}
9321
9322
9323/**
9324 * Fetches a data dword, longjmp on error.
9325 *
9326 * @returns The dword
9327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9328 * @param iSegReg The index of the segment register to use for
9329 * this access. The base and limits are checked.
9330 * @param GCPtrMem The address of the guest memory.
9331 */
9332DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9333{
9334# ifdef IEM_WITH_DATA_TLB
9335 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9336 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9337 {
9338 /// @todo more later.
9339 }
9340
9341 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9342# else
9343 /* The lazy approach. */
9344 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9345 uint32_t const u32Ret = *pu32Src;
9346 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9347 return u32Ret;
9348# endif
9349}
9350#endif
9351
9352
9353#ifdef SOME_UNUSED_FUNCTION
9354/**
9355 * Fetches a data dword and sign extends it to a qword.
9356 *
9357 * @returns Strict VBox status code.
9358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9359 * @param pu64Dst Where to return the sign extended value.
9360 * @param iSegReg The index of the segment register to use for
9361 * this access. The base and limits are checked.
9362 * @param GCPtrMem The address of the guest memory.
9363 */
9364IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9365{
9366 /* The lazy approach for now... */
9367 int32_t const *pi32Src;
9368 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9369 if (rc == VINF_SUCCESS)
9370 {
9371 *pu64Dst = *pi32Src;
9372 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9373 }
9374#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9375 else
9376 *pu64Dst = 0;
9377#endif
9378 return rc;
9379}
9380#endif
9381
9382
9383/**
9384 * Fetches a data qword.
9385 *
9386 * @returns Strict VBox status code.
9387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9388 * @param pu64Dst Where to return the qword.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 */
9393IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9394{
9395 /* The lazy approach for now... */
9396 uint64_t const *pu64Src;
9397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9398 if (rc == VINF_SUCCESS)
9399 {
9400 *pu64Dst = *pu64Src;
9401 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9402 }
9403 return rc;
9404}
9405
9406
9407#ifdef IEM_WITH_SETJMP
9408/**
9409 * Fetches a data qword, longjmp on error.
9410 *
9411 * @returns The qword.
9412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9413 * @param iSegReg The index of the segment register to use for
9414 * this access. The base and limits are checked.
9415 * @param GCPtrMem The address of the guest memory.
9416 */
9417DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9418{
9419 /* The lazy approach for now... */
9420 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9421 uint64_t const u64Ret = *pu64Src;
9422 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9423 return u64Ret;
9424}
9425#endif
9426
9427
9428/**
9429 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9430 *
9431 * @returns Strict VBox status code.
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param pu64Dst Where to return the qword.
9434 * @param iSegReg The index of the segment register to use for
9435 * this access. The base and limits are checked.
9436 * @param GCPtrMem The address of the guest memory.
9437 */
9438IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9439{
9440 /* The lazy approach for now... */
9441 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9442 if (RT_UNLIKELY(GCPtrMem & 15))
9443 return iemRaiseGeneralProtectionFault0(pVCpu);
9444
9445 uint64_t const *pu64Src;
9446 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9447 if (rc == VINF_SUCCESS)
9448 {
9449 *pu64Dst = *pu64Src;
9450 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9451 }
9452 return rc;
9453}
9454
9455
9456#ifdef IEM_WITH_SETJMP
9457/**
9458 * Fetches a data qword, longjmp on error.
9459 *
9460 * @returns The qword.
9461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9462 * @param iSegReg The index of the segment register to use for
9463 * this access. The base and limits are checked.
9464 * @param GCPtrMem The address of the guest memory.
9465 */
9466DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9467{
9468 /* The lazy approach for now... */
9469 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9470 if (RT_LIKELY(!(GCPtrMem & 15)))
9471 {
9472 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9473 uint64_t const u64Ret = *pu64Src;
9474 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9475 return u64Ret;
9476 }
9477
9478 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9479 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9480}
9481#endif
9482
9483
9484/**
9485 * Fetches a data tword.
9486 *
9487 * @returns Strict VBox status code.
9488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9489 * @param pr80Dst Where to return the tword.
9490 * @param iSegReg The index of the segment register to use for
9491 * this access. The base and limits are checked.
9492 * @param GCPtrMem The address of the guest memory.
9493 */
9494IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9495{
9496 /* The lazy approach for now... */
9497 PCRTFLOAT80U pr80Src;
9498 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9499 if (rc == VINF_SUCCESS)
9500 {
9501 *pr80Dst = *pr80Src;
9502 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9503 }
9504 return rc;
9505}
9506
9507
9508#ifdef IEM_WITH_SETJMP
9509/**
9510 * Fetches a data tword, longjmp on error.
9511 *
9512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9513 * @param pr80Dst Where to return the tword.
9514 * @param iSegReg The index of the segment register to use for
9515 * this access. The base and limits are checked.
9516 * @param GCPtrMem The address of the guest memory.
9517 */
9518DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9519{
9520 /* The lazy approach for now... */
9521 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9522 *pr80Dst = *pr80Src;
9523 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9524}
9525#endif
9526
9527
9528/**
9529 * Fetches a data dqword (double qword), generally SSE related.
9530 *
9531 * @returns Strict VBox status code.
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pu128Dst Where to return the qword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 PCRTUINT128U pu128Src;
9542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 if (rc == VINF_SUCCESS)
9544 {
9545 pu128Dst->au64[0] = pu128Src->au64[0];
9546 pu128Dst->au64[1] = pu128Src->au64[1];
9547 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9548 }
9549 return rc;
9550}
9551
9552
9553#ifdef IEM_WITH_SETJMP
9554/**
9555 * Fetches a data dqword (double qword), generally SSE related.
9556 *
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param pu128Dst Where to return the qword.
9559 * @param iSegReg The index of the segment register to use for
9560 * this access. The base and limits are checked.
9561 * @param GCPtrMem The address of the guest memory.
9562 */
9563IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9564{
9565 /* The lazy approach for now... */
9566 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9567 pu128Dst->au64[0] = pu128Src->au64[0];
9568 pu128Dst->au64[1] = pu128Src->au64[1];
9569 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9570}
9571#endif
9572
9573
9574/**
9575 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9576 * related.
9577 *
9578 * Raises \#GP(0) if not aligned.
9579 *
9580 * @returns Strict VBox status code.
9581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9582 * @param pu128Dst Where to return the qword.
9583 * @param iSegReg The index of the segment register to use for
9584 * this access. The base and limits are checked.
9585 * @param GCPtrMem The address of the guest memory.
9586 */
9587IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9588{
9589 /* The lazy approach for now... */
9590 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9591 if ( (GCPtrMem & 15)
9592 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9593 return iemRaiseGeneralProtectionFault0(pVCpu);
9594
9595 PCRTUINT128U pu128Src;
9596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9597 if (rc == VINF_SUCCESS)
9598 {
9599 pu128Dst->au64[0] = pu128Src->au64[0];
9600 pu128Dst->au64[1] = pu128Src->au64[1];
9601 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9602 }
9603 return rc;
9604}
9605
9606
9607#ifdef IEM_WITH_SETJMP
9608/**
9609 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9610 * related, longjmp on error.
9611 *
9612 * Raises \#GP(0) if not aligned.
9613 *
9614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9615 * @param pu128Dst Where to return the qword.
9616 * @param iSegReg The index of the segment register to use for
9617 * this access. The base and limits are checked.
9618 * @param GCPtrMem The address of the guest memory.
9619 */
9620DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9621{
9622 /* The lazy approach for now... */
9623 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9624 if ( (GCPtrMem & 15) == 0
9625 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9626 {
9627 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9628 pu128Dst->au64[0] = pu128Src->au64[0];
9629 pu128Dst->au64[1] = pu128Src->au64[1];
9630 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9631 return;
9632 }
9633
9634 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9635 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9636}
9637#endif
9638
9639
9640/**
9641 * Fetches a data oword (octo word), generally AVX related.
9642 *
9643 * @returns Strict VBox status code.
9644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9645 * @param pu256Dst Where to return the qword.
9646 * @param iSegReg The index of the segment register to use for
9647 * this access. The base and limits are checked.
9648 * @param GCPtrMem The address of the guest memory.
9649 */
9650IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9651{
9652 /* The lazy approach for now... */
9653 PCRTUINT256U pu256Src;
9654 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9655 if (rc == VINF_SUCCESS)
9656 {
9657 pu256Dst->au64[0] = pu256Src->au64[0];
9658 pu256Dst->au64[1] = pu256Src->au64[1];
9659 pu256Dst->au64[2] = pu256Src->au64[2];
9660 pu256Dst->au64[3] = pu256Src->au64[3];
9661 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9662 }
9663 return rc;
9664}
9665
9666
9667#ifdef IEM_WITH_SETJMP
9668/**
9669 * Fetches a data oword (octo word), generally AVX related.
9670 *
9671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9672 * @param pu256Dst Where to return the qword.
9673 * @param iSegReg The index of the segment register to use for
9674 * this access. The base and limits are checked.
9675 * @param GCPtrMem The address of the guest memory.
9676 */
9677IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9678{
9679 /* The lazy approach for now... */
9680 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9681 pu256Dst->au64[0] = pu256Src->au64[0];
9682 pu256Dst->au64[1] = pu256Src->au64[1];
9683 pu256Dst->au64[2] = pu256Src->au64[2];
9684 pu256Dst->au64[3] = pu256Src->au64[3];
9685 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9686}
9687#endif
9688
9689
9690/**
9691 * Fetches a data oword (octo word) at an aligned address, generally AVX
9692 * related.
9693 *
9694 * Raises \#GP(0) if not aligned.
9695 *
9696 * @returns Strict VBox status code.
9697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9698 * @param pu256Dst Where to return the qword.
9699 * @param iSegReg The index of the segment register to use for
9700 * this access. The base and limits are checked.
9701 * @param GCPtrMem The address of the guest memory.
9702 */
9703IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9704{
9705 /* The lazy approach for now... */
9706 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9707 if (GCPtrMem & 31)
9708 return iemRaiseGeneralProtectionFault0(pVCpu);
9709
9710 PCRTUINT256U pu256Src;
9711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9712 if (rc == VINF_SUCCESS)
9713 {
9714 pu256Dst->au64[0] = pu256Src->au64[0];
9715 pu256Dst->au64[1] = pu256Src->au64[1];
9716 pu256Dst->au64[2] = pu256Src->au64[2];
9717 pu256Dst->au64[3] = pu256Src->au64[3];
9718 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9719 }
9720 return rc;
9721}
9722
9723
9724#ifdef IEM_WITH_SETJMP
9725/**
9726 * Fetches a data oword (octo word) at an aligned address, generally AVX
9727 * related, longjmp on error.
9728 *
9729 * Raises \#GP(0) if not aligned.
9730 *
9731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9732 * @param pu256Dst Where to return the qword.
9733 * @param iSegReg The index of the segment register to use for
9734 * this access. The base and limits are checked.
9735 * @param GCPtrMem The address of the guest memory.
9736 */
9737DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9738{
9739 /* The lazy approach for now... */
9740 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9741 if ((GCPtrMem & 31) == 0)
9742 {
9743 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9744 pu256Dst->au64[0] = pu256Src->au64[0];
9745 pu256Dst->au64[1] = pu256Src->au64[1];
9746 pu256Dst->au64[2] = pu256Src->au64[2];
9747 pu256Dst->au64[3] = pu256Src->au64[3];
9748 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9749 return;
9750 }
9751
9752 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9753 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9754}
9755#endif
9756
9757
9758
9759/**
9760 * Fetches a descriptor register (lgdt, lidt).
9761 *
9762 * @returns Strict VBox status code.
9763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9764 * @param pcbLimit Where to return the limit.
9765 * @param pGCPtrBase Where to return the base.
9766 * @param iSegReg The index of the segment register to use for
9767 * this access. The base and limits are checked.
9768 * @param GCPtrMem The address of the guest memory.
9769 * @param enmOpSize The effective operand size.
9770 */
9771IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9772 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9773{
9774 /*
9775 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9776 * little special:
9777 * - The two reads are done separately.
9778 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9779 * - We suspect the 386 to actually commit the limit before the base in
9780 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9781 * don't try emulate this eccentric behavior, because it's not well
9782 * enough understood and rather hard to trigger.
9783 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9784 */
9785 VBOXSTRICTRC rcStrict;
9786 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9787 {
9788 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9789 if (rcStrict == VINF_SUCCESS)
9790 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9791 }
9792 else
9793 {
9794 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9795 if (enmOpSize == IEMMODE_32BIT)
9796 {
9797 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9798 {
9799 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9800 if (rcStrict == VINF_SUCCESS)
9801 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9802 }
9803 else
9804 {
9805 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9806 if (rcStrict == VINF_SUCCESS)
9807 {
9808 *pcbLimit = (uint16_t)uTmp;
9809 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9810 }
9811 }
9812 if (rcStrict == VINF_SUCCESS)
9813 *pGCPtrBase = uTmp;
9814 }
9815 else
9816 {
9817 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9818 if (rcStrict == VINF_SUCCESS)
9819 {
9820 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9821 if (rcStrict == VINF_SUCCESS)
9822 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9823 }
9824 }
9825 }
9826 return rcStrict;
9827}
9828
9829
9830
9831/**
9832 * Stores a data byte.
9833 *
9834 * @returns Strict VBox status code.
9835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9836 * @param iSegReg The index of the segment register to use for
9837 * this access. The base and limits are checked.
9838 * @param GCPtrMem The address of the guest memory.
9839 * @param u8Value The value to store.
9840 */
9841IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9842{
9843 /* The lazy approach for now... */
9844 uint8_t *pu8Dst;
9845 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9846 if (rc == VINF_SUCCESS)
9847 {
9848 *pu8Dst = u8Value;
9849 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9850 }
9851 return rc;
9852}
9853
9854
9855#ifdef IEM_WITH_SETJMP
9856/**
9857 * Stores a data byte, longjmp on error.
9858 *
9859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9860 * @param iSegReg The index of the segment register to use for
9861 * this access. The base and limits are checked.
9862 * @param GCPtrMem The address of the guest memory.
9863 * @param u8Value The value to store.
9864 */
9865IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9866{
9867 /* The lazy approach for now... */
9868 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9869 *pu8Dst = u8Value;
9870 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9871}
9872#endif
9873
9874
9875/**
9876 * Stores a data word.
9877 *
9878 * @returns Strict VBox status code.
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param iSegReg The index of the segment register to use for
9881 * this access. The base and limits are checked.
9882 * @param GCPtrMem The address of the guest memory.
9883 * @param u16Value The value to store.
9884 */
9885IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9886{
9887 /* The lazy approach for now... */
9888 uint16_t *pu16Dst;
9889 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9890 if (rc == VINF_SUCCESS)
9891 {
9892 *pu16Dst = u16Value;
9893 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9894 }
9895 return rc;
9896}
9897
9898
9899#ifdef IEM_WITH_SETJMP
9900/**
9901 * Stores a data word, longjmp on error.
9902 *
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param iSegReg The index of the segment register to use for
9905 * this access. The base and limits are checked.
9906 * @param GCPtrMem The address of the guest memory.
9907 * @param u16Value The value to store.
9908 */
9909IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9910{
9911 /* The lazy approach for now... */
9912 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9913 *pu16Dst = u16Value;
9914 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9915}
9916#endif
9917
9918
9919/**
9920 * Stores a data dword.
9921 *
9922 * @returns Strict VBox status code.
9923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9924 * @param iSegReg The index of the segment register to use for
9925 * this access. The base and limits are checked.
9926 * @param GCPtrMem The address of the guest memory.
9927 * @param u32Value The value to store.
9928 */
9929IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9930{
9931 /* The lazy approach for now... */
9932 uint32_t *pu32Dst;
9933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9934 if (rc == VINF_SUCCESS)
9935 {
9936 *pu32Dst = u32Value;
9937 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9938 }
9939 return rc;
9940}
9941
9942
9943#ifdef IEM_WITH_SETJMP
9944/**
9945 * Stores a data dword.
9946 *
9947 * @returns Strict VBox status code.
9948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9949 * @param iSegReg The index of the segment register to use for
9950 * this access. The base and limits are checked.
9951 * @param GCPtrMem The address of the guest memory.
9952 * @param u32Value The value to store.
9953 */
9954IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9955{
9956 /* The lazy approach for now... */
9957 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9958 *pu32Dst = u32Value;
9959 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9960}
9961#endif
9962
9963
9964/**
9965 * Stores a data qword.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param iSegReg The index of the segment register to use for
9970 * this access. The base and limits are checked.
9971 * @param GCPtrMem The address of the guest memory.
9972 * @param u64Value The value to store.
9973 */
9974IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9975{
9976 /* The lazy approach for now... */
9977 uint64_t *pu64Dst;
9978 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9979 if (rc == VINF_SUCCESS)
9980 {
9981 *pu64Dst = u64Value;
9982 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9983 }
9984 return rc;
9985}
9986
9987
9988#ifdef IEM_WITH_SETJMP
9989/**
9990 * Stores a data qword, longjmp on error.
9991 *
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param iSegReg The index of the segment register to use for
9994 * this access. The base and limits are checked.
9995 * @param GCPtrMem The address of the guest memory.
9996 * @param u64Value The value to store.
9997 */
9998IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9999{
10000 /* The lazy approach for now... */
10001 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10002 *pu64Dst = u64Value;
10003 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10004}
10005#endif
10006
10007
10008/**
10009 * Stores a data dqword.
10010 *
10011 * @returns Strict VBox status code.
10012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10013 * @param iSegReg The index of the segment register to use for
10014 * this access. The base and limits are checked.
10015 * @param GCPtrMem The address of the guest memory.
10016 * @param u128Value The value to store.
10017 */
10018IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10019{
10020 /* The lazy approach for now... */
10021 PRTUINT128U pu128Dst;
10022 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10023 if (rc == VINF_SUCCESS)
10024 {
10025 pu128Dst->au64[0] = u128Value.au64[0];
10026 pu128Dst->au64[1] = u128Value.au64[1];
10027 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10028 }
10029 return rc;
10030}
10031
10032
10033#ifdef IEM_WITH_SETJMP
10034/**
10035 * Stores a data dqword, longjmp on error.
10036 *
10037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10038 * @param iSegReg The index of the segment register to use for
10039 * this access. The base and limits are checked.
10040 * @param GCPtrMem The address of the guest memory.
10041 * @param u128Value The value to store.
10042 */
10043IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10044{
10045 /* The lazy approach for now... */
10046 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10047 pu128Dst->au64[0] = u128Value.au64[0];
10048 pu128Dst->au64[1] = u128Value.au64[1];
10049 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10050}
10051#endif
10052
10053
10054/**
10055 * Stores a data dqword, SSE aligned.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param iSegReg The index of the segment register to use for
10060 * this access. The base and limits are checked.
10061 * @param GCPtrMem The address of the guest memory.
10062 * @param u128Value The value to store.
10063 */
10064IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10065{
10066 /* The lazy approach for now... */
10067 if ( (GCPtrMem & 15)
10068 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10069 return iemRaiseGeneralProtectionFault0(pVCpu);
10070
10071 PRTUINT128U pu128Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu128Dst->au64[0] = u128Value.au64[0];
10076 pu128Dst->au64[1] = u128Value.au64[1];
10077 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data dqword, SSE aligned.
10086 *
10087 * @returns Strict VBox status code.
10088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10089 * @param iSegReg The index of the segment register to use for
10090 * this access. The base and limits are checked.
10091 * @param GCPtrMem The address of the guest memory.
10092 * @param u128Value The value to store.
10093 */
10094DECL_NO_INLINE(IEM_STATIC, void)
10095iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10096{
10097 /* The lazy approach for now... */
10098 if ( (GCPtrMem & 15) == 0
10099 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10100 {
10101 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10102 pu128Dst->au64[0] = u128Value.au64[0];
10103 pu128Dst->au64[1] = u128Value.au64[1];
10104 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10105 return;
10106 }
10107
10108 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10109 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10110}
10111#endif
10112
10113
10114/**
10115 * Stores a data dqword.
10116 *
10117 * @returns Strict VBox status code.
10118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10119 * @param iSegReg The index of the segment register to use for
10120 * this access. The base and limits are checked.
10121 * @param GCPtrMem The address of the guest memory.
10122 * @param pu256Value Pointer to the value to store.
10123 */
10124IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10125{
10126 /* The lazy approach for now... */
10127 PRTUINT256U pu256Dst;
10128 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10129 if (rc == VINF_SUCCESS)
10130 {
10131 pu256Dst->au64[0] = pu256Value->au64[0];
10132 pu256Dst->au64[1] = pu256Value->au64[1];
10133 pu256Dst->au64[2] = pu256Value->au64[2];
10134 pu256Dst->au64[3] = pu256Value->au64[3];
10135 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10136 }
10137 return rc;
10138}
10139
10140
10141#ifdef IEM_WITH_SETJMP
10142/**
10143 * Stores a data dqword, longjmp on error.
10144 *
10145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10146 * @param iSegReg The index of the segment register to use for
10147 * this access. The base and limits are checked.
10148 * @param GCPtrMem The address of the guest memory.
10149 * @param pu256Value Pointer to the value to store.
10150 */
10151IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10152{
10153 /* The lazy approach for now... */
10154 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10155 pu256Dst->au64[0] = pu256Value->au64[0];
10156 pu256Dst->au64[1] = pu256Value->au64[1];
10157 pu256Dst->au64[2] = pu256Value->au64[2];
10158 pu256Dst->au64[3] = pu256Value->au64[3];
10159 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10160}
10161#endif
10162
10163
10164/**
10165 * Stores a data dqword, AVX aligned.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param pu256Value Pointer to the value to store.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10175{
10176 /* The lazy approach for now... */
10177 if (GCPtrMem & 31)
10178 return iemRaiseGeneralProtectionFault0(pVCpu);
10179
10180 PRTUINT256U pu256Dst;
10181 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10182 if (rc == VINF_SUCCESS)
10183 {
10184 pu256Dst->au64[0] = pu256Value->au64[0];
10185 pu256Dst->au64[1] = pu256Value->au64[1];
10186 pu256Dst->au64[2] = pu256Value->au64[2];
10187 pu256Dst->au64[3] = pu256Value->au64[3];
10188 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10189 }
10190 return rc;
10191}
10192
10193
10194#ifdef IEM_WITH_SETJMP
10195/**
10196 * Stores a data dqword, AVX aligned.
10197 *
10198 * @returns Strict VBox status code.
10199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10200 * @param iSegReg The index of the segment register to use for
10201 * this access. The base and limits are checked.
10202 * @param GCPtrMem The address of the guest memory.
10203 * @param pu256Value Pointer to the value to store.
10204 */
10205DECL_NO_INLINE(IEM_STATIC, void)
10206iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10207{
10208 /* The lazy approach for now... */
10209 if ((GCPtrMem & 31) == 0)
10210 {
10211 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10212 pu256Dst->au64[0] = pu256Value->au64[0];
10213 pu256Dst->au64[1] = pu256Value->au64[1];
10214 pu256Dst->au64[2] = pu256Value->au64[2];
10215 pu256Dst->au64[3] = pu256Value->au64[3];
10216 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10217 return;
10218 }
10219
10220 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10221 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10222}
10223#endif
10224
10225
10226/**
10227 * Stores a descriptor register (sgdt, sidt).
10228 *
10229 * @returns Strict VBox status code.
10230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10231 * @param cbLimit The limit.
10232 * @param GCPtrBase The base address.
10233 * @param iSegReg The index of the segment register to use for
10234 * this access. The base and limits are checked.
10235 * @param GCPtrMem The address of the guest memory.
10236 */
10237IEM_STATIC VBOXSTRICTRC
10238iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10239{
10240 /*
10241 * The SIDT and SGDT instructions actually stores the data using two
10242 * independent writes. The instructions does not respond to opsize prefixes.
10243 */
10244 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10245 if (rcStrict == VINF_SUCCESS)
10246 {
10247 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10248 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10249 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10250 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10251 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10252 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10253 else
10254 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10255 }
10256 return rcStrict;
10257}
10258
10259
10260/**
10261 * Pushes a word onto the stack.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param u16Value The value to push.
10266 */
10267IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10268{
10269 /* Increment the stack pointer. */
10270 uint64_t uNewRsp;
10271 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10272
10273 /* Write the word the lazy way. */
10274 uint16_t *pu16Dst;
10275 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10276 if (rc == VINF_SUCCESS)
10277 {
10278 *pu16Dst = u16Value;
10279 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10280 }
10281
10282 /* Commit the new RSP value unless we an access handler made trouble. */
10283 if (rc == VINF_SUCCESS)
10284 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10285
10286 return rc;
10287}
10288
10289
10290/**
10291 * Pushes a dword onto the stack.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10295 * @param u32Value The value to push.
10296 */
10297IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10298{
10299 /* Increment the stack pointer. */
10300 uint64_t uNewRsp;
10301 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10302
10303 /* Write the dword the lazy way. */
10304 uint32_t *pu32Dst;
10305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10306 if (rc == VINF_SUCCESS)
10307 {
10308 *pu32Dst = u32Value;
10309 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10310 }
10311
10312 /* Commit the new RSP value unless we an access handler made trouble. */
10313 if (rc == VINF_SUCCESS)
10314 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10315
10316 return rc;
10317}
10318
10319
10320/**
10321 * Pushes a dword segment register value onto the stack.
10322 *
10323 * @returns Strict VBox status code.
10324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10325 * @param u32Value The value to push.
10326 */
10327IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10328{
10329 /* Increment the stack pointer. */
10330 uint64_t uNewRsp;
10331 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10332
10333 /* The intel docs talks about zero extending the selector register
10334 value. My actual intel CPU here might be zero extending the value
10335 but it still only writes the lower word... */
10336 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10337 * happens when crossing an electric page boundrary, is the high word checked
10338 * for write accessibility or not? Probably it is. What about segment limits?
10339 * It appears this behavior is also shared with trap error codes.
10340 *
10341 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10342 * ancient hardware when it actually did change. */
10343 uint16_t *pu16Dst;
10344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10345 if (rc == VINF_SUCCESS)
10346 {
10347 *pu16Dst = (uint16_t)u32Value;
10348 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10349 }
10350
10351 /* Commit the new RSP value unless we an access handler made trouble. */
10352 if (rc == VINF_SUCCESS)
10353 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10354
10355 return rc;
10356}
10357
10358
10359/**
10360 * Pushes a qword onto the stack.
10361 *
10362 * @returns Strict VBox status code.
10363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10364 * @param u64Value The value to push.
10365 */
10366IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10367{
10368 /* Increment the stack pointer. */
10369 uint64_t uNewRsp;
10370 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10371
10372 /* Write the word the lazy way. */
10373 uint64_t *pu64Dst;
10374 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10375 if (rc == VINF_SUCCESS)
10376 {
10377 *pu64Dst = u64Value;
10378 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10379 }
10380
10381 /* Commit the new RSP value unless we an access handler made trouble. */
10382 if (rc == VINF_SUCCESS)
10383 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10384
10385 return rc;
10386}
10387
10388
10389/**
10390 * Pops a word from the stack.
10391 *
10392 * @returns Strict VBox status code.
10393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10394 * @param pu16Value Where to store the popped value.
10395 */
10396IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10397{
10398 /* Increment the stack pointer. */
10399 uint64_t uNewRsp;
10400 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10401
10402 /* Write the word the lazy way. */
10403 uint16_t const *pu16Src;
10404 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10405 if (rc == VINF_SUCCESS)
10406 {
10407 *pu16Value = *pu16Src;
10408 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10409
10410 /* Commit the new RSP value. */
10411 if (rc == VINF_SUCCESS)
10412 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10413 }
10414
10415 return rc;
10416}
10417
10418
10419/**
10420 * Pops a dword from the stack.
10421 *
10422 * @returns Strict VBox status code.
10423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10424 * @param pu32Value Where to store the popped value.
10425 */
10426IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10427{
10428 /* Increment the stack pointer. */
10429 uint64_t uNewRsp;
10430 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10431
10432 /* Write the word the lazy way. */
10433 uint32_t const *pu32Src;
10434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10435 if (rc == VINF_SUCCESS)
10436 {
10437 *pu32Value = *pu32Src;
10438 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10439
10440 /* Commit the new RSP value. */
10441 if (rc == VINF_SUCCESS)
10442 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10443 }
10444
10445 return rc;
10446}
10447
10448
10449/**
10450 * Pops a qword from the stack.
10451 *
10452 * @returns Strict VBox status code.
10453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10454 * @param pu64Value Where to store the popped value.
10455 */
10456IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10457{
10458 /* Increment the stack pointer. */
10459 uint64_t uNewRsp;
10460 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10461
10462 /* Write the word the lazy way. */
10463 uint64_t const *pu64Src;
10464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10465 if (rc == VINF_SUCCESS)
10466 {
10467 *pu64Value = *pu64Src;
10468 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10469
10470 /* Commit the new RSP value. */
10471 if (rc == VINF_SUCCESS)
10472 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10473 }
10474
10475 return rc;
10476}
10477
10478
10479/**
10480 * Pushes a word onto the stack, using a temporary stack pointer.
10481 *
10482 * @returns Strict VBox status code.
10483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10484 * @param u16Value The value to push.
10485 * @param pTmpRsp Pointer to the temporary stack pointer.
10486 */
10487IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10488{
10489 /* Increment the stack pointer. */
10490 RTUINT64U NewRsp = *pTmpRsp;
10491 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10492
10493 /* Write the word the lazy way. */
10494 uint16_t *pu16Dst;
10495 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10496 if (rc == VINF_SUCCESS)
10497 {
10498 *pu16Dst = u16Value;
10499 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10500 }
10501
10502 /* Commit the new RSP value unless we an access handler made trouble. */
10503 if (rc == VINF_SUCCESS)
10504 *pTmpRsp = NewRsp;
10505
10506 return rc;
10507}
10508
10509
10510/**
10511 * Pushes a dword onto the stack, using a temporary stack pointer.
10512 *
10513 * @returns Strict VBox status code.
10514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10515 * @param u32Value The value to push.
10516 * @param pTmpRsp Pointer to the temporary stack pointer.
10517 */
10518IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10519{
10520 /* Increment the stack pointer. */
10521 RTUINT64U NewRsp = *pTmpRsp;
10522 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10523
10524 /* Write the word the lazy way. */
10525 uint32_t *pu32Dst;
10526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10527 if (rc == VINF_SUCCESS)
10528 {
10529 *pu32Dst = u32Value;
10530 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10531 }
10532
10533 /* Commit the new RSP value unless we an access handler made trouble. */
10534 if (rc == VINF_SUCCESS)
10535 *pTmpRsp = NewRsp;
10536
10537 return rc;
10538}
10539
10540
10541/**
10542 * Pushes a dword onto the stack, using a temporary stack pointer.
10543 *
10544 * @returns Strict VBox status code.
10545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10546 * @param u64Value The value to push.
10547 * @param pTmpRsp Pointer to the temporary stack pointer.
10548 */
10549IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10550{
10551 /* Increment the stack pointer. */
10552 RTUINT64U NewRsp = *pTmpRsp;
10553 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10554
10555 /* Write the word the lazy way. */
10556 uint64_t *pu64Dst;
10557 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10558 if (rc == VINF_SUCCESS)
10559 {
10560 *pu64Dst = u64Value;
10561 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10562 }
10563
10564 /* Commit the new RSP value unless we an access handler made trouble. */
10565 if (rc == VINF_SUCCESS)
10566 *pTmpRsp = NewRsp;
10567
10568 return rc;
10569}
10570
10571
10572/**
10573 * Pops a word from the stack, using a temporary stack pointer.
10574 *
10575 * @returns Strict VBox status code.
10576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10577 * @param pu16Value Where to store the popped value.
10578 * @param pTmpRsp Pointer to the temporary stack pointer.
10579 */
10580IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10581{
10582 /* Increment the stack pointer. */
10583 RTUINT64U NewRsp = *pTmpRsp;
10584 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10585
10586 /* Write the word the lazy way. */
10587 uint16_t const *pu16Src;
10588 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10589 if (rc == VINF_SUCCESS)
10590 {
10591 *pu16Value = *pu16Src;
10592 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10593
10594 /* Commit the new RSP value. */
10595 if (rc == VINF_SUCCESS)
10596 *pTmpRsp = NewRsp;
10597 }
10598
10599 return rc;
10600}
10601
10602
10603/**
10604 * Pops a dword from the stack, using a temporary stack pointer.
10605 *
10606 * @returns Strict VBox status code.
10607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10608 * @param pu32Value Where to store the popped value.
10609 * @param pTmpRsp Pointer to the temporary stack pointer.
10610 */
10611IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10612{
10613 /* Increment the stack pointer. */
10614 RTUINT64U NewRsp = *pTmpRsp;
10615 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10616
10617 /* Write the word the lazy way. */
10618 uint32_t const *pu32Src;
10619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10620 if (rc == VINF_SUCCESS)
10621 {
10622 *pu32Value = *pu32Src;
10623 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10624
10625 /* Commit the new RSP value. */
10626 if (rc == VINF_SUCCESS)
10627 *pTmpRsp = NewRsp;
10628 }
10629
10630 return rc;
10631}
10632
10633
10634/**
10635 * Pops a qword from the stack, using a temporary stack pointer.
10636 *
10637 * @returns Strict VBox status code.
10638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10639 * @param pu64Value Where to store the popped value.
10640 * @param pTmpRsp Pointer to the temporary stack pointer.
10641 */
10642IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10643{
10644 /* Increment the stack pointer. */
10645 RTUINT64U NewRsp = *pTmpRsp;
10646 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10647
10648 /* Write the word the lazy way. */
10649 uint64_t const *pu64Src;
10650 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10651 if (rcStrict == VINF_SUCCESS)
10652 {
10653 *pu64Value = *pu64Src;
10654 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10655
10656 /* Commit the new RSP value. */
10657 if (rcStrict == VINF_SUCCESS)
10658 *pTmpRsp = NewRsp;
10659 }
10660
10661 return rcStrict;
10662}
10663
10664
10665/**
10666 * Begin a special stack push (used by interrupt, exceptions and such).
10667 *
10668 * This will raise \#SS or \#PF if appropriate.
10669 *
10670 * @returns Strict VBox status code.
10671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10672 * @param cbMem The number of bytes to push onto the stack.
10673 * @param ppvMem Where to return the pointer to the stack memory.
10674 * As with the other memory functions this could be
10675 * direct access or bounce buffered access, so
10676 * don't commit register until the commit call
10677 * succeeds.
10678 * @param puNewRsp Where to return the new RSP value. This must be
10679 * passed unchanged to
10680 * iemMemStackPushCommitSpecial().
10681 */
10682IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10683{
10684 Assert(cbMem < UINT8_MAX);
10685 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10686 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10687}
10688
10689
10690/**
10691 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10692 *
10693 * This will update the rSP.
10694 *
10695 * @returns Strict VBox status code.
10696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10697 * @param pvMem The pointer returned by
10698 * iemMemStackPushBeginSpecial().
10699 * @param uNewRsp The new RSP value returned by
10700 * iemMemStackPushBeginSpecial().
10701 */
10702IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10703{
10704 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10705 if (rcStrict == VINF_SUCCESS)
10706 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10707 return rcStrict;
10708}
10709
10710
10711/**
10712 * Begin a special stack pop (used by iret, retf and such).
10713 *
10714 * This will raise \#SS or \#PF if appropriate.
10715 *
10716 * @returns Strict VBox status code.
10717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10718 * @param cbMem The number of bytes to pop from the stack.
10719 * @param ppvMem Where to return the pointer to the stack memory.
10720 * @param puNewRsp Where to return the new RSP value. This must be
10721 * assigned to CPUMCTX::rsp manually some time
10722 * after iemMemStackPopDoneSpecial() has been
10723 * called.
10724 */
10725IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10726{
10727 Assert(cbMem < UINT8_MAX);
10728 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10729 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10730}
10731
10732
10733/**
10734 * Continue a special stack pop (used by iret and retf).
10735 *
10736 * This will raise \#SS or \#PF if appropriate.
10737 *
10738 * @returns Strict VBox status code.
10739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10740 * @param cbMem The number of bytes to pop from the stack.
10741 * @param ppvMem Where to return the pointer to the stack memory.
10742 * @param puNewRsp Where to return the new RSP value. This must be
10743 * assigned to CPUMCTX::rsp manually some time
10744 * after iemMemStackPopDoneSpecial() has been
10745 * called.
10746 */
10747IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10748{
10749 Assert(cbMem < UINT8_MAX);
10750 RTUINT64U NewRsp;
10751 NewRsp.u = *puNewRsp;
10752 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10753 *puNewRsp = NewRsp.u;
10754 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10755}
10756
10757
10758/**
10759 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10760 * iemMemStackPopContinueSpecial).
10761 *
10762 * The caller will manually commit the rSP.
10763 *
10764 * @returns Strict VBox status code.
10765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10766 * @param pvMem The pointer returned by
10767 * iemMemStackPopBeginSpecial() or
10768 * iemMemStackPopContinueSpecial().
10769 */
10770IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10771{
10772 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10773}
10774
10775
10776/**
10777 * Fetches a system table byte.
10778 *
10779 * @returns Strict VBox status code.
10780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10781 * @param pbDst Where to return the byte.
10782 * @param iSegReg The index of the segment register to use for
10783 * this access. The base and limits are checked.
10784 * @param GCPtrMem The address of the guest memory.
10785 */
10786IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10787{
10788 /* The lazy approach for now... */
10789 uint8_t const *pbSrc;
10790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10791 if (rc == VINF_SUCCESS)
10792 {
10793 *pbDst = *pbSrc;
10794 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10795 }
10796 return rc;
10797}
10798
10799
10800/**
10801 * Fetches a system table word.
10802 *
10803 * @returns Strict VBox status code.
10804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10805 * @param pu16Dst Where to return the word.
10806 * @param iSegReg The index of the segment register to use for
10807 * this access. The base and limits are checked.
10808 * @param GCPtrMem The address of the guest memory.
10809 */
10810IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10811{
10812 /* The lazy approach for now... */
10813 uint16_t const *pu16Src;
10814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10815 if (rc == VINF_SUCCESS)
10816 {
10817 *pu16Dst = *pu16Src;
10818 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10819 }
10820 return rc;
10821}
10822
10823
10824/**
10825 * Fetches a system table dword.
10826 *
10827 * @returns Strict VBox status code.
10828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10829 * @param pu32Dst Where to return the dword.
10830 * @param iSegReg The index of the segment register to use for
10831 * this access. The base and limits are checked.
10832 * @param GCPtrMem The address of the guest memory.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10835{
10836 /* The lazy approach for now... */
10837 uint32_t const *pu32Src;
10838 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10839 if (rc == VINF_SUCCESS)
10840 {
10841 *pu32Dst = *pu32Src;
10842 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10843 }
10844 return rc;
10845}
10846
10847
10848/**
10849 * Fetches a system table qword.
10850 *
10851 * @returns Strict VBox status code.
10852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10853 * @param pu64Dst Where to return the qword.
10854 * @param iSegReg The index of the segment register to use for
10855 * this access. The base and limits are checked.
10856 * @param GCPtrMem The address of the guest memory.
10857 */
10858IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10859{
10860 /* The lazy approach for now... */
10861 uint64_t const *pu64Src;
10862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10863 if (rc == VINF_SUCCESS)
10864 {
10865 *pu64Dst = *pu64Src;
10866 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10867 }
10868 return rc;
10869}
10870
10871
10872/**
10873 * Fetches a descriptor table entry with caller specified error code.
10874 *
10875 * @returns Strict VBox status code.
10876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10877 * @param pDesc Where to return the descriptor table entry.
10878 * @param uSel The selector which table entry to fetch.
10879 * @param uXcpt The exception to raise on table lookup error.
10880 * @param uErrorCode The error code associated with the exception.
10881 */
10882IEM_STATIC VBOXSTRICTRC
10883iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10884{
10885 AssertPtr(pDesc);
10886 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10887
10888 /** @todo did the 286 require all 8 bytes to be accessible? */
10889 /*
10890 * Get the selector table base and check bounds.
10891 */
10892 RTGCPTR GCPtrBase;
10893 if (uSel & X86_SEL_LDT)
10894 {
10895 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10896 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10897 {
10898 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10899 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10900 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10901 uErrorCode, 0);
10902 }
10903
10904 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10905 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10906 }
10907 else
10908 {
10909 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10910 {
10911 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10912 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10913 uErrorCode, 0);
10914 }
10915 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10916 }
10917
10918 /*
10919 * Read the legacy descriptor and maybe the long mode extensions if
10920 * required.
10921 */
10922 VBOXSTRICTRC rcStrict;
10923 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10924 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10925 else
10926 {
10927 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10928 if (rcStrict == VINF_SUCCESS)
10929 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10930 if (rcStrict == VINF_SUCCESS)
10931 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10932 if (rcStrict == VINF_SUCCESS)
10933 pDesc->Legacy.au16[3] = 0;
10934 else
10935 return rcStrict;
10936 }
10937
10938 if (rcStrict == VINF_SUCCESS)
10939 {
10940 if ( !IEM_IS_LONG_MODE(pVCpu)
10941 || pDesc->Legacy.Gen.u1DescType)
10942 pDesc->Long.au64[1] = 0;
10943 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10944 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10945 else
10946 {
10947 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10948 /** @todo is this the right exception? */
10949 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10950 }
10951 }
10952 return rcStrict;
10953}
10954
10955
10956/**
10957 * Fetches a descriptor table entry.
10958 *
10959 * @returns Strict VBox status code.
10960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10961 * @param pDesc Where to return the descriptor table entry.
10962 * @param uSel The selector which table entry to fetch.
10963 * @param uXcpt The exception to raise on table lookup error.
10964 */
10965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10966{
10967 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10968}
10969
10970
10971/**
10972 * Fakes a long mode stack selector for SS = 0.
10973 *
10974 * @param pDescSs Where to return the fake stack descriptor.
10975 * @param uDpl The DPL we want.
10976 */
10977IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10978{
10979 pDescSs->Long.au64[0] = 0;
10980 pDescSs->Long.au64[1] = 0;
10981 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10982 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10983 pDescSs->Long.Gen.u2Dpl = uDpl;
10984 pDescSs->Long.Gen.u1Present = 1;
10985 pDescSs->Long.Gen.u1Long = 1;
10986}
10987
10988
10989/**
10990 * Marks the selector descriptor as accessed (only non-system descriptors).
10991 *
10992 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10993 * will therefore skip the limit checks.
10994 *
10995 * @returns Strict VBox status code.
10996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10997 * @param uSel The selector.
10998 */
10999IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11000{
11001 /*
11002 * Get the selector table base and calculate the entry address.
11003 */
11004 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11005 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11006 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11007 GCPtr += uSel & X86_SEL_MASK;
11008
11009 /*
11010 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11011 * ugly stuff to avoid this. This will make sure it's an atomic access
11012 * as well more or less remove any question about 8-bit or 32-bit accesss.
11013 */
11014 VBOXSTRICTRC rcStrict;
11015 uint32_t volatile *pu32;
11016 if ((GCPtr & 3) == 0)
11017 {
11018 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11019 GCPtr += 2 + 2;
11020 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11021 if (rcStrict != VINF_SUCCESS)
11022 return rcStrict;
11023 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11024 }
11025 else
11026 {
11027 /* The misaligned GDT/LDT case, map the whole thing. */
11028 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11029 if (rcStrict != VINF_SUCCESS)
11030 return rcStrict;
11031 switch ((uintptr_t)pu32 & 3)
11032 {
11033 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11034 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11035 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11036 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11037 }
11038 }
11039
11040 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11041}
11042
11043/** @} */
11044
11045
11046/*
11047 * Include the C/C++ implementation of instruction.
11048 */
11049#include "IEMAllCImpl.cpp.h"
11050
11051
11052
11053/** @name "Microcode" macros.
11054 *
11055 * The idea is that we should be able to use the same code to interpret
11056 * instructions as well as recompiler instructions. Thus this obfuscation.
11057 *
11058 * @{
11059 */
11060#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11061#define IEM_MC_END() }
11062#define IEM_MC_PAUSE() do {} while (0)
11063#define IEM_MC_CONTINUE() do {} while (0)
11064
11065/** Internal macro. */
11066#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11067 do \
11068 { \
11069 VBOXSTRICTRC rcStrict2 = a_Expr; \
11070 if (rcStrict2 != VINF_SUCCESS) \
11071 return rcStrict2; \
11072 } while (0)
11073
11074
11075#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11076#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11077#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11078#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11079#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11080#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11081#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11082#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11083#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11084 do { \
11085 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11086 return iemRaiseDeviceNotAvailable(pVCpu); \
11087 } while (0)
11088#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11089 do { \
11090 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11091 return iemRaiseDeviceNotAvailable(pVCpu); \
11092 } while (0)
11093#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11094 do { \
11095 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11096 return iemRaiseMathFault(pVCpu); \
11097 } while (0)
11098#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11099 do { \
11100 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11101 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11102 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11103 return iemRaiseUndefinedOpcode(pVCpu); \
11104 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11105 return iemRaiseDeviceNotAvailable(pVCpu); \
11106 } while (0)
11107#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11108 do { \
11109 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11112 return iemRaiseUndefinedOpcode(pVCpu); \
11113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11114 return iemRaiseDeviceNotAvailable(pVCpu); \
11115 } while (0)
11116#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11117 do { \
11118 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11119 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11121 return iemRaiseUndefinedOpcode(pVCpu); \
11122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11123 return iemRaiseDeviceNotAvailable(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11126 do { \
11127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11130 return iemRaiseUndefinedOpcode(pVCpu); \
11131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11135 do { \
11136 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11139 return iemRaiseUndefinedOpcode(pVCpu); \
11140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11141 return iemRaiseDeviceNotAvailable(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11144 do { \
11145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11148 return iemRaiseUndefinedOpcode(pVCpu); \
11149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11150 return iemRaiseDeviceNotAvailable(pVCpu); \
11151 } while (0)
11152#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11153 do { \
11154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11156 return iemRaiseUndefinedOpcode(pVCpu); \
11157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11158 return iemRaiseDeviceNotAvailable(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11161 do { \
11162 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11163 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11164 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11167 return iemRaiseDeviceNotAvailable(pVCpu); \
11168 } while (0)
11169#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11170 do { \
11171 if (pVCpu->iem.s.uCpl != 0) \
11172 return iemRaiseGeneralProtectionFault0(pVCpu); \
11173 } while (0)
11174#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11175 do { \
11176 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11177 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11178 } while (0)
11179#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11180 do { \
11181 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11182 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11183 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11184 return iemRaiseUndefinedOpcode(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11187 do { \
11188 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11189 return iemRaiseGeneralProtectionFault0(pVCpu); \
11190 } while (0)
11191
11192
11193#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11194#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11195#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11196#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11197#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11198#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11199#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11200 uint32_t a_Name; \
11201 uint32_t *a_pName = &a_Name
11202#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11203 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11204
11205#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11206#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11207
11208#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11209#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11210#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11211#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11212#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11213#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11214#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11215#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11216#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11217#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11218#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11219#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11220#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11225#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11226 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11227 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11228 } while (0)
11229#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11230 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11231 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11232 } while (0)
11233#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11234 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11235 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11236 } while (0)
11237/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11238#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11239 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11240 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11241 } while (0)
11242#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11243 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11244 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11245 } while (0)
11246/** @note Not for IOPL or IF testing or modification. */
11247#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11248#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11249#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11250#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11251
11252#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11253#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11254#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11255#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11256#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11257#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11258#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11259#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11260#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11261#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11262/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11263#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11264 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11265 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11266 } while (0)
11267#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11268 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11269 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11270 } while (0)
11271#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11272 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11273
11274
11275#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11276#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11277/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11278 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11279#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11280#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11281/** @note Not for IOPL or IF testing or modification. */
11282#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11283
11284#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11285#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11286#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11287 do { \
11288 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11289 *pu32Reg += (a_u32Value); \
11290 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11291 } while (0)
11292#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11293
11294#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11295#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11296#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11297 do { \
11298 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11299 *pu32Reg -= (a_u32Value); \
11300 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11301 } while (0)
11302#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11303#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11304
11305#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11306#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11307#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11308#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11309#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11310#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11311#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11312
11313#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11314#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11315#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11316#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11317
11318#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11319#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11320#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11321
11322#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11323#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11324#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11325
11326#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11327#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11328#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11329
11330#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11331#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11332#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11333
11334#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11335
11336#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11337
11338#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11339#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11340#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11341 do { \
11342 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11343 *pu32Reg &= (a_u32Value); \
11344 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11345 } while (0)
11346#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11347
11348#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11349#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11350#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11351 do { \
11352 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11353 *pu32Reg |= (a_u32Value); \
11354 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11355 } while (0)
11356#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11357
11358
11359/** @note Not for IOPL or IF modification. */
11360#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11361/** @note Not for IOPL or IF modification. */
11362#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11363/** @note Not for IOPL or IF modification. */
11364#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11365
11366#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11367
11368/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11369#define IEM_MC_FPU_TO_MMX_MODE() do { \
11370 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11371 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11372 } while (0)
11373
11374/** Switches the FPU state from MMX mode (FTW=0xffff). */
11375#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11376 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11377 } while (0)
11378
11379#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11380 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11381#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11382 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11383#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11384 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11385 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11386 } while (0)
11387#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11388 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11389 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11390 } while (0)
11391#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11392 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11393#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11394 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11395#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11396 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11397
11398#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11399 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11400 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11401 } while (0)
11402#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11403 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11404#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11405 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11406#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11407 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11408#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11409 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11410 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11411 } while (0)
11412#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11413 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11414#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11415 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11416 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11417 } while (0)
11418#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11419 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11420#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11421 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11422 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11423 } while (0)
11424#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11425 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11426#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11427 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11428#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11429 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11430#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11431 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11432#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11433 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11434 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11435 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11436 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11437 } while (0)
11438
11439#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11440 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11441 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11442 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11443 } while (0)
11444#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11445 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11446 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11447 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11448 } while (0)
11449#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11450 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11451 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11452 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11453 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11454 } while (0)
11455#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11456 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11457 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11458 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11459 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11460 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11461 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11462 } while (0)
11463
11464#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11465#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11466 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11467 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11468 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11469 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11470 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11471 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11472 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11473 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11474 } while (0)
11475#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11476 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11477 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11478 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11479 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11480 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11481 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11482 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11483 } while (0)
11484#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11485 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11486 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11487 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11488 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11490 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11491 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11492 } while (0)
11493#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11494 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11495 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11496 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11498 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11499 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11500 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11501 } while (0)
11502
11503#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11504 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11505#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11506 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11507#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11508 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11509#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11510 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11511 uintptr_t const iYRegTmp = (a_iYReg); \
11512 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11513 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11514 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11515 } while (0)
11516
11517#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11518 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11519 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11520 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11521 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11523 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11525 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11526 } while (0)
11527#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11528 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11529 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11530 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11535 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11536 } while (0)
11537#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11538 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11539 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11540 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11546 } while (0)
11547
11548#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11549 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11550 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11551 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11552 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11558 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11559 } while (0)
11560#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11561 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11562 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11563 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11564 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11572 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11575 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11580 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11581 } while (0)
11582#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11583 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11584 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11585 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11587 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11590 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11591 } while (0)
11592
11593#ifndef IEM_WITH_SETJMP
11594# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11596# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11597 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11598# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11600#else
11601# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11602 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11603# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11604 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11605# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11606 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11607#endif
11608
11609#ifndef IEM_WITH_SETJMP
11610# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11612# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11613 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11614# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11615 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11616#else
11617# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11618 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11619# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11620 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11621# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11622 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11623#endif
11624
11625#ifndef IEM_WITH_SETJMP
11626# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11627 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11628# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11630# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11632#else
11633# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11634 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11636 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11637# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639#endif
11640
11641#ifdef SOME_UNUSED_FUNCTION
11642# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11643 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11644#endif
11645
11646#ifndef IEM_WITH_SETJMP
11647# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11649# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11651# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11655#else
11656# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11657 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11658# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11659 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11660# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11661 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11663 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11664#endif
11665
11666#ifndef IEM_WITH_SETJMP
11667# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11671# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11673#else
11674# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11675 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11676# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11677 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11679 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11680#endif
11681
11682#ifndef IEM_WITH_SETJMP
11683# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11687#else
11688# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11689 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11690# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11691 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11692#endif
11693
11694#ifndef IEM_WITH_SETJMP
11695# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11697# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11699#else
11700# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11701 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11702# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11703 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11704#endif
11705
11706
11707
11708#ifndef IEM_WITH_SETJMP
11709# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11710 do { \
11711 uint8_t u8Tmp; \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11713 (a_u16Dst) = u8Tmp; \
11714 } while (0)
11715# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11716 do { \
11717 uint8_t u8Tmp; \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11719 (a_u32Dst) = u8Tmp; \
11720 } while (0)
11721# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11722 do { \
11723 uint8_t u8Tmp; \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11725 (a_u64Dst) = u8Tmp; \
11726 } while (0)
11727# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11728 do { \
11729 uint16_t u16Tmp; \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11731 (a_u32Dst) = u16Tmp; \
11732 } while (0)
11733# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11734 do { \
11735 uint16_t u16Tmp; \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11737 (a_u64Dst) = u16Tmp; \
11738 } while (0)
11739# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11740 do { \
11741 uint32_t u32Tmp; \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11743 (a_u64Dst) = u32Tmp; \
11744 } while (0)
11745#else /* IEM_WITH_SETJMP */
11746# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11757 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11758#endif /* IEM_WITH_SETJMP */
11759
11760#ifndef IEM_WITH_SETJMP
11761# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11762 do { \
11763 uint8_t u8Tmp; \
11764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11765 (a_u16Dst) = (int8_t)u8Tmp; \
11766 } while (0)
11767# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11768 do { \
11769 uint8_t u8Tmp; \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11771 (a_u32Dst) = (int8_t)u8Tmp; \
11772 } while (0)
11773# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11774 do { \
11775 uint8_t u8Tmp; \
11776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11777 (a_u64Dst) = (int8_t)u8Tmp; \
11778 } while (0)
11779# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11780 do { \
11781 uint16_t u16Tmp; \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11783 (a_u32Dst) = (int16_t)u16Tmp; \
11784 } while (0)
11785# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11786 do { \
11787 uint16_t u16Tmp; \
11788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11789 (a_u64Dst) = (int16_t)u16Tmp; \
11790 } while (0)
11791# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11792 do { \
11793 uint32_t u32Tmp; \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11795 (a_u64Dst) = (int32_t)u32Tmp; \
11796 } while (0)
11797#else /* IEM_WITH_SETJMP */
11798# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11809 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11810#endif /* IEM_WITH_SETJMP */
11811
11812#ifndef IEM_WITH_SETJMP
11813# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11815# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11817# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11818 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11819# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11821#else
11822# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11823 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11824# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11825 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11826# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11827 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11828# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11829 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11830#endif
11831
11832#ifndef IEM_WITH_SETJMP
11833# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11835# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11836 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11837# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11839# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11841#else
11842# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11843 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11844# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11845 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11846# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11847 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11848# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11849 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11850#endif
11851
11852#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11853#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11854#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11855#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11856#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11857#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11858#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11859 do { \
11860 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11861 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11862 } while (0)
11863
11864#ifndef IEM_WITH_SETJMP
11865# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11867# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11869#else
11870# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11871 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11872# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11873 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11874#endif
11875
11876#ifndef IEM_WITH_SETJMP
11877# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11879# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11881#else
11882# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11883 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11884# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11885 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11886#endif
11887
11888
11889#define IEM_MC_PUSH_U16(a_u16Value) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11891#define IEM_MC_PUSH_U32(a_u32Value) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11893#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11895#define IEM_MC_PUSH_U64(a_u64Value) \
11896 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11897
11898#define IEM_MC_POP_U16(a_pu16Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11900#define IEM_MC_POP_U32(a_pu32Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11902#define IEM_MC_POP_U64(a_pu64Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11904
11905/** Maps guest memory for direct or bounce buffered access.
11906 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11907 * @remarks May return.
11908 */
11909#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11911
11912/** Maps guest memory for direct or bounce buffered access.
11913 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11914 * @remarks May return.
11915 */
11916#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11917 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11918
11919/** Commits the memory and unmaps the guest memory.
11920 * @remarks May return.
11921 */
11922#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11924
11925/** Commits the memory and unmaps the guest memory unless the FPU status word
11926 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11927 * that would cause FLD not to store.
11928 *
11929 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11930 * store, while \#P will not.
11931 *
11932 * @remarks May in theory return - for now.
11933 */
11934#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11935 do { \
11936 if ( !(a_u16FSW & X86_FSW_ES) \
11937 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11938 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11940 } while (0)
11941
11942/** Calculate efficient address from R/M. */
11943#ifndef IEM_WITH_SETJMP
11944# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11945 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11946#else
11947# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11948 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11949#endif
11950
11951#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11952#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11953#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11954#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11955#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11956#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11957#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11958
11959/**
11960 * Defers the rest of the instruction emulation to a C implementation routine
11961 * and returns, only taking the standard parameters.
11962 *
11963 * @param a_pfnCImpl The pointer to the C routine.
11964 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11965 */
11966#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11967
11968/**
11969 * Defers the rest of instruction emulation to a C implementation routine and
11970 * returns, taking one argument in addition to the standard ones.
11971 *
11972 * @param a_pfnCImpl The pointer to the C routine.
11973 * @param a0 The argument.
11974 */
11975#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11976
11977/**
11978 * Defers the rest of the instruction emulation to a C implementation routine
11979 * and returns, taking two arguments in addition to the standard ones.
11980 *
11981 * @param a_pfnCImpl The pointer to the C routine.
11982 * @param a0 The first extra argument.
11983 * @param a1 The second extra argument.
11984 */
11985#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11986
11987/**
11988 * Defers the rest of the instruction emulation to a C implementation routine
11989 * and returns, taking three arguments in addition to the standard ones.
11990 *
11991 * @param a_pfnCImpl The pointer to the C routine.
11992 * @param a0 The first extra argument.
11993 * @param a1 The second extra argument.
11994 * @param a2 The third extra argument.
11995 */
11996#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11997
11998/**
11999 * Defers the rest of the instruction emulation to a C implementation routine
12000 * and returns, taking four arguments in addition to the standard ones.
12001 *
12002 * @param a_pfnCImpl The pointer to the C routine.
12003 * @param a0 The first extra argument.
12004 * @param a1 The second extra argument.
12005 * @param a2 The third extra argument.
12006 * @param a3 The fourth extra argument.
12007 */
12008#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12009
12010/**
12011 * Defers the rest of the instruction emulation to a C implementation routine
12012 * and returns, taking two arguments in addition to the standard ones.
12013 *
12014 * @param a_pfnCImpl The pointer to the C routine.
12015 * @param a0 The first extra argument.
12016 * @param a1 The second extra argument.
12017 * @param a2 The third extra argument.
12018 * @param a3 The fourth extra argument.
12019 * @param a4 The fifth extra argument.
12020 */
12021#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12022
12023/**
12024 * Defers the entire instruction emulation to a C implementation routine and
12025 * returns, only taking the standard parameters.
12026 *
12027 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12028 *
12029 * @param a_pfnCImpl The pointer to the C routine.
12030 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12031 */
12032#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12033
12034/**
12035 * Defers the entire instruction emulation to a C implementation routine and
12036 * returns, taking one argument in addition to the standard ones.
12037 *
12038 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12039 *
12040 * @param a_pfnCImpl The pointer to the C routine.
12041 * @param a0 The argument.
12042 */
12043#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12044
12045/**
12046 * Defers the entire instruction emulation to a C implementation routine and
12047 * returns, taking two arguments in addition to the standard ones.
12048 *
12049 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12050 *
12051 * @param a_pfnCImpl The pointer to the C routine.
12052 * @param a0 The first extra argument.
12053 * @param a1 The second extra argument.
12054 */
12055#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12056
12057/**
12058 * Defers the entire instruction emulation to a C implementation routine and
12059 * returns, taking three arguments in addition to the standard ones.
12060 *
12061 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12062 *
12063 * @param a_pfnCImpl The pointer to the C routine.
12064 * @param a0 The first extra argument.
12065 * @param a1 The second extra argument.
12066 * @param a2 The third extra argument.
12067 */
12068#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12069
12070/**
12071 * Calls a FPU assembly implementation taking one visible argument.
12072 *
12073 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12074 * @param a0 The first extra argument.
12075 */
12076#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12077 do { \
12078 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12079 } while (0)
12080
12081/**
12082 * Calls a FPU assembly implementation taking two visible arguments.
12083 *
12084 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12085 * @param a0 The first extra argument.
12086 * @param a1 The second extra argument.
12087 */
12088#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12089 do { \
12090 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12091 } while (0)
12092
12093/**
12094 * Calls a FPU assembly implementation taking three visible arguments.
12095 *
12096 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12097 * @param a0 The first extra argument.
12098 * @param a1 The second extra argument.
12099 * @param a2 The third extra argument.
12100 */
12101#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12102 do { \
12103 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12104 } while (0)
12105
12106#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12107 do { \
12108 (a_FpuData).FSW = (a_FSW); \
12109 (a_FpuData).r80Result = *(a_pr80Value); \
12110 } while (0)
12111
12112/** Pushes FPU result onto the stack. */
12113#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12114 iemFpuPushResult(pVCpu, &a_FpuData)
12115/** Pushes FPU result onto the stack and sets the FPUDP. */
12116#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12117 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12118
12119/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12120#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12121 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12122
12123/** Stores FPU result in a stack register. */
12124#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12125 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12126/** Stores FPU result in a stack register and pops the stack. */
12127#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12128 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12129/** Stores FPU result in a stack register and sets the FPUDP. */
12130#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12131 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12132/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12133 * stack. */
12134#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12135 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12136
12137/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12138#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12139 iemFpuUpdateOpcodeAndIp(pVCpu)
12140/** Free a stack register (for FFREE and FFREEP). */
12141#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12142 iemFpuStackFree(pVCpu, a_iStReg)
12143/** Increment the FPU stack pointer. */
12144#define IEM_MC_FPU_STACK_INC_TOP() \
12145 iemFpuStackIncTop(pVCpu)
12146/** Decrement the FPU stack pointer. */
12147#define IEM_MC_FPU_STACK_DEC_TOP() \
12148 iemFpuStackDecTop(pVCpu)
12149
12150/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12151#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12152 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12153/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12154#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12155 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12156/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12157#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12158 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12159/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12160#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12161 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12162/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12163 * stack. */
12164#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12165 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12166/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12167#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12168 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12169
12170/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12171#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12172 iemFpuStackUnderflow(pVCpu, a_iStDst)
12173/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12174 * stack. */
12175#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12176 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12177/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12178 * FPUDS. */
12179#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12180 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12181/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12182 * FPUDS. Pops stack. */
12183#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12184 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12185/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12186 * stack twice. */
12187#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12188 iemFpuStackUnderflowThenPopPop(pVCpu)
12189/** Raises a FPU stack underflow exception for an instruction pushing a result
12190 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12191#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12192 iemFpuStackPushUnderflow(pVCpu)
12193/** Raises a FPU stack underflow exception for an instruction pushing a result
12194 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12195#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12196 iemFpuStackPushUnderflowTwo(pVCpu)
12197
12198/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12199 * FPUIP, FPUCS and FOP. */
12200#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12201 iemFpuStackPushOverflow(pVCpu)
12202/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12203 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12204#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12205 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12206/** Prepares for using the FPU state.
12207 * Ensures that we can use the host FPU in the current context (RC+R0.
12208 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12209#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12210/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12211#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12212/** Actualizes the guest FPU state so it can be accessed and modified. */
12213#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12214
12215/** Prepares for using the SSE state.
12216 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12217 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12218#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12219/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12220#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12221/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12222#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12223
12224/** Prepares for using the AVX state.
12225 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12226 * Ensures the guest AVX state in the CPUMCTX is up to date.
12227 * @note This will include the AVX512 state too when support for it is added
12228 * due to the zero extending feature of VEX instruction. */
12229#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12230/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12231#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12232/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12233#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12234
12235/**
12236 * Calls a MMX assembly implementation taking two visible arguments.
12237 *
12238 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12239 * @param a0 The first extra argument.
12240 * @param a1 The second extra argument.
12241 */
12242#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12243 do { \
12244 IEM_MC_PREPARE_FPU_USAGE(); \
12245 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12246 } while (0)
12247
12248/**
12249 * Calls a MMX assembly implementation taking three visible arguments.
12250 *
12251 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12252 * @param a0 The first extra argument.
12253 * @param a1 The second extra argument.
12254 * @param a2 The third extra argument.
12255 */
12256#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12257 do { \
12258 IEM_MC_PREPARE_FPU_USAGE(); \
12259 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12260 } while (0)
12261
12262
12263/**
12264 * Calls a SSE assembly implementation taking two visible arguments.
12265 *
12266 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12267 * @param a0 The first extra argument.
12268 * @param a1 The second extra argument.
12269 */
12270#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12271 do { \
12272 IEM_MC_PREPARE_SSE_USAGE(); \
12273 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12274 } while (0)
12275
12276/**
12277 * Calls a SSE assembly implementation taking three visible arguments.
12278 *
12279 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12280 * @param a0 The first extra argument.
12281 * @param a1 The second extra argument.
12282 * @param a2 The third extra argument.
12283 */
12284#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12285 do { \
12286 IEM_MC_PREPARE_SSE_USAGE(); \
12287 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12288 } while (0)
12289
12290
12291/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12292 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12293#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12294 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12295
12296/**
12297 * Calls a AVX assembly implementation taking two visible arguments.
12298 *
12299 * There is one implicit zero'th argument, a pointer to the extended state.
12300 *
12301 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12302 * @param a1 The first extra argument.
12303 * @param a2 The second extra argument.
12304 */
12305#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12306 do { \
12307 IEM_MC_PREPARE_AVX_USAGE(); \
12308 a_pfnAImpl(pXState, (a1), (a2)); \
12309 } while (0)
12310
12311/**
12312 * Calls a AVX assembly implementation taking three visible arguments.
12313 *
12314 * There is one implicit zero'th argument, a pointer to the extended state.
12315 *
12316 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12317 * @param a1 The first extra argument.
12318 * @param a2 The second extra argument.
12319 * @param a3 The third extra argument.
12320 */
12321#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12322 do { \
12323 IEM_MC_PREPARE_AVX_USAGE(); \
12324 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12325 } while (0)
12326
12327/** @note Not for IOPL or IF testing. */
12328#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12329/** @note Not for IOPL or IF testing. */
12330#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12331/** @note Not for IOPL or IF testing. */
12332#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12333/** @note Not for IOPL or IF testing. */
12334#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12337 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12338 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12339/** @note Not for IOPL or IF testing. */
12340#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12341 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12342 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12343/** @note Not for IOPL or IF testing. */
12344#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12345 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12346 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12347 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12348/** @note Not for IOPL or IF testing. */
12349#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12350 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12351 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12352 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12353#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12354#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12355#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12358 if ( pVCpu->cpum.GstCtx.cx != 0 \
12359 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12360/** @note Not for IOPL or IF testing. */
12361#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12362 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12363 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12366 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12367 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12370 if ( pVCpu->cpum.GstCtx.cx != 0 \
12371 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12374 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12375 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12378 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12379 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12380#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12381#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12382
12383#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12384 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12385#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12386 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12387#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12388 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12389#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12390 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12391#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12392 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12393#define IEM_MC_IF_FCW_IM() \
12394 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12395
12396#define IEM_MC_ELSE() } else {
12397#define IEM_MC_ENDIF() } do {} while (0)
12398
12399/** @} */
12400
12401
12402/** @name Opcode Debug Helpers.
12403 * @{
12404 */
12405#ifdef VBOX_WITH_STATISTICS
12406# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12407#else
12408# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12409#endif
12410
12411#ifdef DEBUG
12412# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12413 do { \
12414 IEMOP_INC_STATS(a_Stats); \
12415 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12416 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12417 } while (0)
12418
12419# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12420 do { \
12421 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12422 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12423 (void)RT_CONCAT(OP_,a_Upper); \
12424 (void)(a_fDisHints); \
12425 (void)(a_fIemHints); \
12426 } while (0)
12427
12428# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12429 do { \
12430 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12431 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12432 (void)RT_CONCAT(OP_,a_Upper); \
12433 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12434 (void)(a_fDisHints); \
12435 (void)(a_fIemHints); \
12436 } while (0)
12437
12438# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12439 do { \
12440 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12441 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12442 (void)RT_CONCAT(OP_,a_Upper); \
12443 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12444 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12445 (void)(a_fDisHints); \
12446 (void)(a_fIemHints); \
12447 } while (0)
12448
12449# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12450 do { \
12451 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12452 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12453 (void)RT_CONCAT(OP_,a_Upper); \
12454 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12455 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12456 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12457 (void)(a_fDisHints); \
12458 (void)(a_fIemHints); \
12459 } while (0)
12460
12461# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12462 do { \
12463 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12464 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12465 (void)RT_CONCAT(OP_,a_Upper); \
12466 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12467 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12468 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12469 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12470 (void)(a_fDisHints); \
12471 (void)(a_fIemHints); \
12472 } while (0)
12473
12474#else
12475# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12476
12477# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12478 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12479# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12481# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12482 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12483# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12485# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12486 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12487
12488#endif
12489
12490#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12491 IEMOP_MNEMONIC0EX(a_Lower, \
12492 #a_Lower, \
12493 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12494#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12496 #a_Lower " " #a_Op1, \
12497 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12498#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12500 #a_Lower " " #a_Op1 "," #a_Op2, \
12501 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12502#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12503 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12504 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12505 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12506#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12507 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12508 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12509 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12510
12511/** @} */
12512
12513
12514/** @name Opcode Helpers.
12515 * @{
12516 */
12517
12518#ifdef IN_RING3
12519# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12520 do { \
12521 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12522 else \
12523 { \
12524 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12525 return IEMOP_RAISE_INVALID_OPCODE(); \
12526 } \
12527 } while (0)
12528#else
12529# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12530 do { \
12531 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12532 else return IEMOP_RAISE_INVALID_OPCODE(); \
12533 } while (0)
12534#endif
12535
12536/** The instruction requires a 186 or later. */
12537#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12538# define IEMOP_HLP_MIN_186() do { } while (0)
12539#else
12540# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12541#endif
12542
12543/** The instruction requires a 286 or later. */
12544#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12545# define IEMOP_HLP_MIN_286() do { } while (0)
12546#else
12547# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12548#endif
12549
12550/** The instruction requires a 386 or later. */
12551#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12552# define IEMOP_HLP_MIN_386() do { } while (0)
12553#else
12554# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12555#endif
12556
12557/** The instruction requires a 386 or later if the given expression is true. */
12558#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12559# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12560#else
12561# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12562#endif
12563
12564/** The instruction requires a 486 or later. */
12565#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12566# define IEMOP_HLP_MIN_486() do { } while (0)
12567#else
12568# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12569#endif
12570
12571/** The instruction requires a Pentium (586) or later. */
12572#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12573# define IEMOP_HLP_MIN_586() do { } while (0)
12574#else
12575# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12576#endif
12577
12578/** The instruction requires a PentiumPro (686) or later. */
12579#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12580# define IEMOP_HLP_MIN_686() do { } while (0)
12581#else
12582# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12583#endif
12584
12585
12586/** The instruction raises an \#UD in real and V8086 mode. */
12587#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12588 do \
12589 { \
12590 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12591 else return IEMOP_RAISE_INVALID_OPCODE(); \
12592 } while (0)
12593
12594#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12595/** This instruction raises an \#UD in real and V8086 mode or when not using a
12596 * 64-bit code segment when in long mode (applicable to all VMX instructions
12597 * except VMCALL).
12598 */
12599#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12600 do \
12601 { \
12602 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12603 && ( !IEM_IS_LONG_MODE(pVCpu) \
12604 || IEM_IS_64BIT_CODE(pVCpu))) \
12605 { /* likely */ } \
12606 else \
12607 { \
12608 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12609 { \
12610 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12611 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12612 return IEMOP_RAISE_INVALID_OPCODE(); \
12613 } \
12614 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12615 { \
12616 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12617 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12618 return IEMOP_RAISE_INVALID_OPCODE(); \
12619 } \
12620 } \
12621 } while (0)
12622
12623/** The instruction can only be executed in VMX operation (VMX root mode and
12624 * non-root mode).
12625 *
12626 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12627 */
12628# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12629 do \
12630 { \
12631 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12632 else \
12633 { \
12634 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12635 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12636 return IEMOP_RAISE_INVALID_OPCODE(); \
12637 } \
12638 } while (0)
12639#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12640
12641/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12642 * 64-bit mode. */
12643#define IEMOP_HLP_NO_64BIT() \
12644 do \
12645 { \
12646 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12647 return IEMOP_RAISE_INVALID_OPCODE(); \
12648 } while (0)
12649
12650/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12651 * 64-bit mode. */
12652#define IEMOP_HLP_ONLY_64BIT() \
12653 do \
12654 { \
12655 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12656 return IEMOP_RAISE_INVALID_OPCODE(); \
12657 } while (0)
12658
12659/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12660#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12661 do \
12662 { \
12663 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12664 iemRecalEffOpSize64Default(pVCpu); \
12665 } while (0)
12666
12667/** The instruction has 64-bit operand size if 64-bit mode. */
12668#define IEMOP_HLP_64BIT_OP_SIZE() \
12669 do \
12670 { \
12671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12672 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12673 } while (0)
12674
12675/** Only a REX prefix immediately preceeding the first opcode byte takes
12676 * effect. This macro helps ensuring this as well as logging bad guest code. */
12677#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12678 do \
12679 { \
12680 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12681 { \
12682 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12683 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12684 pVCpu->iem.s.uRexB = 0; \
12685 pVCpu->iem.s.uRexIndex = 0; \
12686 pVCpu->iem.s.uRexReg = 0; \
12687 iemRecalEffOpSize(pVCpu); \
12688 } \
12689 } while (0)
12690
12691/**
12692 * Done decoding.
12693 */
12694#define IEMOP_HLP_DONE_DECODING() \
12695 do \
12696 { \
12697 /*nothing for now, maybe later... */ \
12698 } while (0)
12699
12700/**
12701 * Done decoding, raise \#UD exception if lock prefix present.
12702 */
12703#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12704 do \
12705 { \
12706 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12707 { /* likely */ } \
12708 else \
12709 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12710 } while (0)
12711
12712
12713/**
12714 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12715 * repnz or size prefixes are present, or if in real or v8086 mode.
12716 */
12717#define IEMOP_HLP_DONE_VEX_DECODING() \
12718 do \
12719 { \
12720 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12721 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12722 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12723 { /* likely */ } \
12724 else \
12725 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12726 } while (0)
12727
12728/**
12729 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12730 * repnz or size prefixes are present, or if in real or v8086 mode.
12731 */
12732#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12733 do \
12734 { \
12735 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12736 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12737 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12738 && pVCpu->iem.s.uVexLength == 0)) \
12739 { /* likely */ } \
12740 else \
12741 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12742 } while (0)
12743
12744
12745/**
12746 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12747 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12748 * register 0, or if in real or v8086 mode.
12749 */
12750#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12751 do \
12752 { \
12753 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12754 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12755 && !pVCpu->iem.s.uVex3rdReg \
12756 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12757 { /* likely */ } \
12758 else \
12759 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12760 } while (0)
12761
12762/**
12763 * Done decoding VEX, no V, L=0.
12764 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12765 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12766 */
12767#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12768 do \
12769 { \
12770 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12771 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12772 && pVCpu->iem.s.uVexLength == 0 \
12773 && pVCpu->iem.s.uVex3rdReg == 0 \
12774 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12775 { /* likely */ } \
12776 else \
12777 return IEMOP_RAISE_INVALID_OPCODE(); \
12778 } while (0)
12779
12780#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12781 do \
12782 { \
12783 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12784 { /* likely */ } \
12785 else \
12786 { \
12787 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12788 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12789 } \
12790 } while (0)
12791#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12792 do \
12793 { \
12794 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12795 { /* likely */ } \
12796 else \
12797 { \
12798 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12799 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12800 } \
12801 } while (0)
12802
12803/**
12804 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12805 * are present.
12806 */
12807#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12808 do \
12809 { \
12810 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12811 { /* likely */ } \
12812 else \
12813 return IEMOP_RAISE_INVALID_OPCODE(); \
12814 } while (0)
12815
12816/**
12817 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12818 * prefixes are present.
12819 */
12820#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12821 do \
12822 { \
12823 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12824 { /* likely */ } \
12825 else \
12826 return IEMOP_RAISE_INVALID_OPCODE(); \
12827 } while (0)
12828
12829
12830/**
12831 * Calculates the effective address of a ModR/M memory operand.
12832 *
12833 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12834 *
12835 * @return Strict VBox status code.
12836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12837 * @param bRm The ModRM byte.
12838 * @param cbImm The size of any immediate following the
12839 * effective address opcode bytes. Important for
12840 * RIP relative addressing.
12841 * @param pGCPtrEff Where to return the effective address.
12842 */
12843IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12844{
12845 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12846# define SET_SS_DEF() \
12847 do \
12848 { \
12849 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12850 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12851 } while (0)
12852
12853 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12854 {
12855/** @todo Check the effective address size crap! */
12856 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12857 {
12858 uint16_t u16EffAddr;
12859
12860 /* Handle the disp16 form with no registers first. */
12861 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12862 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12863 else
12864 {
12865 /* Get the displacment. */
12866 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12867 {
12868 case 0: u16EffAddr = 0; break;
12869 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12870 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12871 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12872 }
12873
12874 /* Add the base and index registers to the disp. */
12875 switch (bRm & X86_MODRM_RM_MASK)
12876 {
12877 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12878 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12879 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12880 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12881 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12882 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12883 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12884 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12885 }
12886 }
12887
12888 *pGCPtrEff = u16EffAddr;
12889 }
12890 else
12891 {
12892 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12893 uint32_t u32EffAddr;
12894
12895 /* Handle the disp32 form with no registers first. */
12896 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12897 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12898 else
12899 {
12900 /* Get the register (or SIB) value. */
12901 switch ((bRm & X86_MODRM_RM_MASK))
12902 {
12903 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12904 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12905 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12906 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12907 case 4: /* SIB */
12908 {
12909 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12910
12911 /* Get the index and scale it. */
12912 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12913 {
12914 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12915 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12916 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12917 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12918 case 4: u32EffAddr = 0; /*none */ break;
12919 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12920 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12921 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12923 }
12924 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12925
12926 /* add base */
12927 switch (bSib & X86_SIB_BASE_MASK)
12928 {
12929 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12930 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12931 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12932 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12933 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12934 case 5:
12935 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12936 {
12937 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12938 SET_SS_DEF();
12939 }
12940 else
12941 {
12942 uint32_t u32Disp;
12943 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12944 u32EffAddr += u32Disp;
12945 }
12946 break;
12947 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12948 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12949 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12950 }
12951 break;
12952 }
12953 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12954 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12955 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12956 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12957 }
12958
12959 /* Get and add the displacement. */
12960 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12961 {
12962 case 0:
12963 break;
12964 case 1:
12965 {
12966 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12967 u32EffAddr += i8Disp;
12968 break;
12969 }
12970 case 2:
12971 {
12972 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12973 u32EffAddr += u32Disp;
12974 break;
12975 }
12976 default:
12977 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12978 }
12979
12980 }
12981 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12982 *pGCPtrEff = u32EffAddr;
12983 else
12984 {
12985 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12986 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12987 }
12988 }
12989 }
12990 else
12991 {
12992 uint64_t u64EffAddr;
12993
12994 /* Handle the rip+disp32 form with no registers first. */
12995 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12996 {
12997 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12998 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12999 }
13000 else
13001 {
13002 /* Get the register (or SIB) value. */
13003 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13004 {
13005 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13006 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13007 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13008 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13009 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13010 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13011 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13012 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13013 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13014 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13015 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13016 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13017 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13018 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13019 /* SIB */
13020 case 4:
13021 case 12:
13022 {
13023 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13024
13025 /* Get the index and scale it. */
13026 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13027 {
13028 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13029 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13030 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13031 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13032 case 4: u64EffAddr = 0; /*none */ break;
13033 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13034 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13035 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13036 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13037 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13038 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13039 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13040 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13041 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13042 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13043 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13045 }
13046 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13047
13048 /* add base */
13049 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13050 {
13051 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13052 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13053 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13054 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13055 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13056 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13057 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13058 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13059 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13060 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13061 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13062 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13063 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13064 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13065 /* complicated encodings */
13066 case 5:
13067 case 13:
13068 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13069 {
13070 if (!pVCpu->iem.s.uRexB)
13071 {
13072 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13073 SET_SS_DEF();
13074 }
13075 else
13076 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13077 }
13078 else
13079 {
13080 uint32_t u32Disp;
13081 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13082 u64EffAddr += (int32_t)u32Disp;
13083 }
13084 break;
13085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13086 }
13087 break;
13088 }
13089 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13090 }
13091
13092 /* Get and add the displacement. */
13093 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13094 {
13095 case 0:
13096 break;
13097 case 1:
13098 {
13099 int8_t i8Disp;
13100 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13101 u64EffAddr += i8Disp;
13102 break;
13103 }
13104 case 2:
13105 {
13106 uint32_t u32Disp;
13107 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13108 u64EffAddr += (int32_t)u32Disp;
13109 break;
13110 }
13111 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13112 }
13113
13114 }
13115
13116 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13117 *pGCPtrEff = u64EffAddr;
13118 else
13119 {
13120 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13121 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13122 }
13123 }
13124
13125 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13126 return VINF_SUCCESS;
13127}
13128
13129
13130/**
13131 * Calculates the effective address of a ModR/M memory operand.
13132 *
13133 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13134 *
13135 * @return Strict VBox status code.
13136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13137 * @param bRm The ModRM byte.
13138 * @param cbImm The size of any immediate following the
13139 * effective address opcode bytes. Important for
13140 * RIP relative addressing.
13141 * @param pGCPtrEff Where to return the effective address.
13142 * @param offRsp RSP displacement.
13143 */
13144IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13145{
13146 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13147# define SET_SS_DEF() \
13148 do \
13149 { \
13150 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13151 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13152 } while (0)
13153
13154 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13155 {
13156/** @todo Check the effective address size crap! */
13157 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13158 {
13159 uint16_t u16EffAddr;
13160
13161 /* Handle the disp16 form with no registers first. */
13162 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13163 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13164 else
13165 {
13166 /* Get the displacment. */
13167 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13168 {
13169 case 0: u16EffAddr = 0; break;
13170 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13171 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13172 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13173 }
13174
13175 /* Add the base and index registers to the disp. */
13176 switch (bRm & X86_MODRM_RM_MASK)
13177 {
13178 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13179 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13180 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13181 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13182 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13183 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13184 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13185 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13186 }
13187 }
13188
13189 *pGCPtrEff = u16EffAddr;
13190 }
13191 else
13192 {
13193 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13194 uint32_t u32EffAddr;
13195
13196 /* Handle the disp32 form with no registers first. */
13197 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13198 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13199 else
13200 {
13201 /* Get the register (or SIB) value. */
13202 switch ((bRm & X86_MODRM_RM_MASK))
13203 {
13204 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13205 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13206 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13207 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13208 case 4: /* SIB */
13209 {
13210 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13211
13212 /* Get the index and scale it. */
13213 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13214 {
13215 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13216 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13217 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13218 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13219 case 4: u32EffAddr = 0; /*none */ break;
13220 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13221 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13222 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13224 }
13225 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13226
13227 /* add base */
13228 switch (bSib & X86_SIB_BASE_MASK)
13229 {
13230 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13231 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13232 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13233 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13234 case 4:
13235 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13236 SET_SS_DEF();
13237 break;
13238 case 5:
13239 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13240 {
13241 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13242 SET_SS_DEF();
13243 }
13244 else
13245 {
13246 uint32_t u32Disp;
13247 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13248 u32EffAddr += u32Disp;
13249 }
13250 break;
13251 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13252 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13253 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13254 }
13255 break;
13256 }
13257 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13258 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13259 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13260 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13261 }
13262
13263 /* Get and add the displacement. */
13264 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13265 {
13266 case 0:
13267 break;
13268 case 1:
13269 {
13270 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13271 u32EffAddr += i8Disp;
13272 break;
13273 }
13274 case 2:
13275 {
13276 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13277 u32EffAddr += u32Disp;
13278 break;
13279 }
13280 default:
13281 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13282 }
13283
13284 }
13285 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13286 *pGCPtrEff = u32EffAddr;
13287 else
13288 {
13289 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13290 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13291 }
13292 }
13293 }
13294 else
13295 {
13296 uint64_t u64EffAddr;
13297
13298 /* Handle the rip+disp32 form with no registers first. */
13299 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13300 {
13301 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13302 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13303 }
13304 else
13305 {
13306 /* Get the register (or SIB) value. */
13307 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13308 {
13309 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13310 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13311 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13312 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13313 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13314 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13315 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13316 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13317 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13318 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13319 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13320 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13321 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13322 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13323 /* SIB */
13324 case 4:
13325 case 12:
13326 {
13327 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13328
13329 /* Get the index and scale it. */
13330 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13331 {
13332 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13333 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13334 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13335 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13336 case 4: u64EffAddr = 0; /*none */ break;
13337 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13338 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13339 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13340 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13341 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13342 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13343 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13344 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13345 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13346 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13347 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13349 }
13350 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13351
13352 /* add base */
13353 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13354 {
13355 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13356 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13357 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13358 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13359 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13360 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13361 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13362 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13363 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13364 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13365 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13366 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13367 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13368 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13369 /* complicated encodings */
13370 case 5:
13371 case 13:
13372 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13373 {
13374 if (!pVCpu->iem.s.uRexB)
13375 {
13376 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13377 SET_SS_DEF();
13378 }
13379 else
13380 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13381 }
13382 else
13383 {
13384 uint32_t u32Disp;
13385 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13386 u64EffAddr += (int32_t)u32Disp;
13387 }
13388 break;
13389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13390 }
13391 break;
13392 }
13393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13394 }
13395
13396 /* Get and add the displacement. */
13397 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13398 {
13399 case 0:
13400 break;
13401 case 1:
13402 {
13403 int8_t i8Disp;
13404 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13405 u64EffAddr += i8Disp;
13406 break;
13407 }
13408 case 2:
13409 {
13410 uint32_t u32Disp;
13411 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13412 u64EffAddr += (int32_t)u32Disp;
13413 break;
13414 }
13415 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13416 }
13417
13418 }
13419
13420 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13421 *pGCPtrEff = u64EffAddr;
13422 else
13423 {
13424 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13425 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13426 }
13427 }
13428
13429 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13430 return VINF_SUCCESS;
13431}
13432
13433
13434#ifdef IEM_WITH_SETJMP
13435/**
13436 * Calculates the effective address of a ModR/M memory operand.
13437 *
13438 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13439 *
13440 * May longjmp on internal error.
13441 *
13442 * @return The effective address.
13443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13444 * @param bRm The ModRM byte.
13445 * @param cbImm The size of any immediate following the
13446 * effective address opcode bytes. Important for
13447 * RIP relative addressing.
13448 */
13449IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13450{
13451 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13452# define SET_SS_DEF() \
13453 do \
13454 { \
13455 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13456 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13457 } while (0)
13458
13459 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13460 {
13461/** @todo Check the effective address size crap! */
13462 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13463 {
13464 uint16_t u16EffAddr;
13465
13466 /* Handle the disp16 form with no registers first. */
13467 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13468 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13469 else
13470 {
13471 /* Get the displacment. */
13472 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13473 {
13474 case 0: u16EffAddr = 0; break;
13475 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13476 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13477 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13478 }
13479
13480 /* Add the base and index registers to the disp. */
13481 switch (bRm & X86_MODRM_RM_MASK)
13482 {
13483 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13484 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13485 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13486 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13487 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13488 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13489 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13490 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13491 }
13492 }
13493
13494 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13495 return u16EffAddr;
13496 }
13497
13498 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13499 uint32_t u32EffAddr;
13500
13501 /* Handle the disp32 form with no registers first. */
13502 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13503 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13504 else
13505 {
13506 /* Get the register (or SIB) value. */
13507 switch ((bRm & X86_MODRM_RM_MASK))
13508 {
13509 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13510 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13511 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13512 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13513 case 4: /* SIB */
13514 {
13515 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13516
13517 /* Get the index and scale it. */
13518 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13519 {
13520 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13521 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13522 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13523 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13524 case 4: u32EffAddr = 0; /*none */ break;
13525 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13526 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13527 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13528 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13529 }
13530 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13531
13532 /* add base */
13533 switch (bSib & X86_SIB_BASE_MASK)
13534 {
13535 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13536 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13537 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13538 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13539 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13540 case 5:
13541 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13542 {
13543 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13544 SET_SS_DEF();
13545 }
13546 else
13547 {
13548 uint32_t u32Disp;
13549 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13550 u32EffAddr += u32Disp;
13551 }
13552 break;
13553 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13554 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13555 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13556 }
13557 break;
13558 }
13559 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13560 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13561 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13562 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13563 }
13564
13565 /* Get and add the displacement. */
13566 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13567 {
13568 case 0:
13569 break;
13570 case 1:
13571 {
13572 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13573 u32EffAddr += i8Disp;
13574 break;
13575 }
13576 case 2:
13577 {
13578 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13579 u32EffAddr += u32Disp;
13580 break;
13581 }
13582 default:
13583 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13584 }
13585 }
13586
13587 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13588 {
13589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13590 return u32EffAddr;
13591 }
13592 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13593 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13594 return u32EffAddr & UINT16_MAX;
13595 }
13596
13597 uint64_t u64EffAddr;
13598
13599 /* Handle the rip+disp32 form with no registers first. */
13600 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13601 {
13602 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13603 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13604 }
13605 else
13606 {
13607 /* Get the register (or SIB) value. */
13608 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13609 {
13610 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13611 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13612 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13613 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13614 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13615 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13616 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13617 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13618 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13619 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13620 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13621 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13622 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13623 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13624 /* SIB */
13625 case 4:
13626 case 12:
13627 {
13628 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13629
13630 /* Get the index and scale it. */
13631 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13632 {
13633 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13634 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13635 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13636 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13637 case 4: u64EffAddr = 0; /*none */ break;
13638 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13639 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13640 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13641 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13642 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13643 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13644 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13645 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13646 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13647 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13648 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13650 }
13651 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13652
13653 /* add base */
13654 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13655 {
13656 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13657 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13658 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13659 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13660 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13661 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13662 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13663 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13664 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13665 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13666 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13667 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13668 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13669 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13670 /* complicated encodings */
13671 case 5:
13672 case 13:
13673 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13674 {
13675 if (!pVCpu->iem.s.uRexB)
13676 {
13677 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13678 SET_SS_DEF();
13679 }
13680 else
13681 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13682 }
13683 else
13684 {
13685 uint32_t u32Disp;
13686 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13687 u64EffAddr += (int32_t)u32Disp;
13688 }
13689 break;
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13691 }
13692 break;
13693 }
13694 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13695 }
13696
13697 /* Get and add the displacement. */
13698 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13699 {
13700 case 0:
13701 break;
13702 case 1:
13703 {
13704 int8_t i8Disp;
13705 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13706 u64EffAddr += i8Disp;
13707 break;
13708 }
13709 case 2:
13710 {
13711 uint32_t u32Disp;
13712 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13713 u64EffAddr += (int32_t)u32Disp;
13714 break;
13715 }
13716 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13717 }
13718
13719 }
13720
13721 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13722 {
13723 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13724 return u64EffAddr;
13725 }
13726 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13727 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13728 return u64EffAddr & UINT32_MAX;
13729}
13730#endif /* IEM_WITH_SETJMP */
13731
13732/** @} */
13733
13734
13735
13736/*
13737 * Include the instructions
13738 */
13739#include "IEMAllInstructions.cpp.h"
13740
13741
13742
13743#ifdef LOG_ENABLED
13744/**
13745 * Logs the current instruction.
13746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13747 * @param fSameCtx Set if we have the same context information as the VMM,
13748 * clear if we may have already executed an instruction in
13749 * our debug context. When clear, we assume IEMCPU holds
13750 * valid CPU mode info.
13751 *
13752 * The @a fSameCtx parameter is now misleading and obsolete.
13753 * @param pszFunction The IEM function doing the execution.
13754 */
13755IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13756{
13757# ifdef IN_RING3
13758 if (LogIs2Enabled())
13759 {
13760 char szInstr[256];
13761 uint32_t cbInstr = 0;
13762 if (fSameCtx)
13763 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13764 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13765 szInstr, sizeof(szInstr), &cbInstr);
13766 else
13767 {
13768 uint32_t fFlags = 0;
13769 switch (pVCpu->iem.s.enmCpuMode)
13770 {
13771 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13772 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13773 case IEMMODE_16BIT:
13774 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13775 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13776 else
13777 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13778 break;
13779 }
13780 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13781 szInstr, sizeof(szInstr), &cbInstr);
13782 }
13783
13784 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13785 Log2(("**** %s\n"
13786 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13787 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13788 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13789 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13790 " %s\n"
13791 , pszFunction,
13792 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13793 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13794 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13795 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13796 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13797 szInstr));
13798
13799 if (LogIs3Enabled())
13800 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13801 }
13802 else
13803# endif
13804 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13805 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13806 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13807}
13808#endif /* LOG_ENABLED */
13809
13810
13811#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13812/**
13813 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13814 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13815 *
13816 * @returns Modified rcStrict.
13817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13818 * @param rcStrict The instruction execution status.
13819 */
13820static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13821{
13822 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13823 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13824 {
13825 /* VMX preemption timer takes priority over NMI-window exits. */
13826 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13827 {
13828 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13829 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13830 }
13831 /*
13832 * Check remaining intercepts.
13833 *
13834 * NMI-window and Interrupt-window VM-exits.
13835 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13836 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13837 *
13838 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13839 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13840 */
13841 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13842 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13843 && !TRPMHasTrap(pVCpu))
13844 {
13845 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13846 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13847 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13848 {
13849 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13850 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13851 }
13852 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13853 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13854 {
13855 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13856 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13857 }
13858 }
13859 }
13860 /* TPR-below threshold/APIC write has the highest priority. */
13861 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13862 {
13863 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13864 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13865 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13866 }
13867 /* MTF takes priority over VMX-preemption timer. */
13868 else
13869 {
13870 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13871 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13872 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13873 }
13874 return rcStrict;
13875}
13876#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13877
13878
13879/**
13880 * Makes status code addjustments (pass up from I/O and access handler)
13881 * as well as maintaining statistics.
13882 *
13883 * @returns Strict VBox status code to pass up.
13884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13885 * @param rcStrict The status from executing an instruction.
13886 */
13887DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13888{
13889 if (rcStrict != VINF_SUCCESS)
13890 {
13891 if (RT_SUCCESS(rcStrict))
13892 {
13893 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13894 || rcStrict == VINF_IOM_R3_IOPORT_READ
13895 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13896 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13897 || rcStrict == VINF_IOM_R3_MMIO_READ
13898 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13899 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13900 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13901 || rcStrict == VINF_CPUM_R3_MSR_READ
13902 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13903 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13904 || rcStrict == VINF_EM_RAW_TO_R3
13905 || rcStrict == VINF_EM_TRIPLE_FAULT
13906 || rcStrict == VINF_GIM_R3_HYPERCALL
13907 /* raw-mode / virt handlers only: */
13908 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13909 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13910 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13911 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13912 || rcStrict == VINF_SELM_SYNC_GDT
13913 || rcStrict == VINF_CSAM_PENDING_ACTION
13914 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13915 /* nested hw.virt codes: */
13916 || rcStrict == VINF_VMX_VMEXIT
13917 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13918 || rcStrict == VINF_SVM_VMEXIT
13919 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13920/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13921 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13922#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13923 if ( rcStrict == VINF_VMX_VMEXIT
13924 && rcPassUp == VINF_SUCCESS)
13925 rcStrict = VINF_SUCCESS;
13926 else
13927#endif
13928#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13929 if ( rcStrict == VINF_SVM_VMEXIT
13930 && rcPassUp == VINF_SUCCESS)
13931 rcStrict = VINF_SUCCESS;
13932 else
13933#endif
13934 if (rcPassUp == VINF_SUCCESS)
13935 pVCpu->iem.s.cRetInfStatuses++;
13936 else if ( rcPassUp < VINF_EM_FIRST
13937 || rcPassUp > VINF_EM_LAST
13938 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13939 {
13940 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13941 pVCpu->iem.s.cRetPassUpStatus++;
13942 rcStrict = rcPassUp;
13943 }
13944 else
13945 {
13946 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13947 pVCpu->iem.s.cRetInfStatuses++;
13948 }
13949 }
13950 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13951 pVCpu->iem.s.cRetAspectNotImplemented++;
13952 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13953 pVCpu->iem.s.cRetInstrNotImplemented++;
13954 else
13955 pVCpu->iem.s.cRetErrStatuses++;
13956 }
13957 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13958 {
13959 pVCpu->iem.s.cRetPassUpStatus++;
13960 rcStrict = pVCpu->iem.s.rcPassUp;
13961 }
13962
13963 return rcStrict;
13964}
13965
13966
13967/**
13968 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13969 * IEMExecOneWithPrefetchedByPC.
13970 *
13971 * Similar code is found in IEMExecLots.
13972 *
13973 * @return Strict VBox status code.
13974 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13975 * @param fExecuteInhibit If set, execute the instruction following CLI,
13976 * POP SS and MOV SS,GR.
13977 * @param pszFunction The calling function name.
13978 */
13979DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13980{
13981 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13982 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13983 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13984 RT_NOREF_PV(pszFunction);
13985
13986#ifdef IEM_WITH_SETJMP
13987 VBOXSTRICTRC rcStrict;
13988 jmp_buf JmpBuf;
13989 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13990 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13991 if ((rcStrict = setjmp(JmpBuf)) == 0)
13992 {
13993 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13994 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13995 }
13996 else
13997 pVCpu->iem.s.cLongJumps++;
13998 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13999#else
14000 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14001 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14002#endif
14003 if (rcStrict == VINF_SUCCESS)
14004 pVCpu->iem.s.cInstructions++;
14005 if (pVCpu->iem.s.cActiveMappings > 0)
14006 {
14007 Assert(rcStrict != VINF_SUCCESS);
14008 iemMemRollback(pVCpu);
14009 }
14010 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14011 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14012 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14013
14014//#ifdef DEBUG
14015// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14016//#endif
14017
14018#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14019 /*
14020 * Perform any VMX nested-guest instruction boundary actions.
14021 *
14022 * If any of these causes a VM-exit, we must skip executing the next
14023 * instruction (would run into stale page tables). A VM-exit makes sure
14024 * there is no interrupt-inhibition, so that should ensure we don't go
14025 * to try execute the next instruction. Clearing fExecuteInhibit is
14026 * problematic because of the setjmp/longjmp clobbering above.
14027 */
14028 if ( rcStrict == VINF_SUCCESS
14029 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14030 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14031 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14032#endif
14033
14034 /* Execute the next instruction as well if a cli, pop ss or
14035 mov ss, Gr has just completed successfully. */
14036 if ( fExecuteInhibit
14037 && rcStrict == VINF_SUCCESS
14038 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14039 && EMIsInhibitInterruptsActive(pVCpu))
14040 {
14041 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14042 if (rcStrict == VINF_SUCCESS)
14043 {
14044#ifdef LOG_ENABLED
14045 iemLogCurInstr(pVCpu, false, pszFunction);
14046#endif
14047#ifdef IEM_WITH_SETJMP
14048 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14049 if ((rcStrict = setjmp(JmpBuf)) == 0)
14050 {
14051 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14052 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14053 }
14054 else
14055 pVCpu->iem.s.cLongJumps++;
14056 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14057#else
14058 IEM_OPCODE_GET_NEXT_U8(&b);
14059 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14060#endif
14061 if (rcStrict == VINF_SUCCESS)
14062 pVCpu->iem.s.cInstructions++;
14063 if (pVCpu->iem.s.cActiveMappings > 0)
14064 {
14065 Assert(rcStrict != VINF_SUCCESS);
14066 iemMemRollback(pVCpu);
14067 }
14068 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14069 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14070 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14071 }
14072 else if (pVCpu->iem.s.cActiveMappings > 0)
14073 iemMemRollback(pVCpu);
14074 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14075 }
14076
14077 /*
14078 * Return value fiddling, statistics and sanity assertions.
14079 */
14080 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14081
14082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14084 return rcStrict;
14085}
14086
14087
14088/**
14089 * Execute one instruction.
14090 *
14091 * @return Strict VBox status code.
14092 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14093 */
14094VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14095{
14096#ifdef LOG_ENABLED
14097 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14098#endif
14099
14100 /*
14101 * Do the decoding and emulation.
14102 */
14103 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14104 if (rcStrict == VINF_SUCCESS)
14105 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14106 else if (pVCpu->iem.s.cActiveMappings > 0)
14107 iemMemRollback(pVCpu);
14108
14109 if (rcStrict != VINF_SUCCESS)
14110 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14111 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14112 return rcStrict;
14113}
14114
14115
14116VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14117{
14118 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14119
14120 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14121 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14122 if (rcStrict == VINF_SUCCESS)
14123 {
14124 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14125 if (pcbWritten)
14126 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14127 }
14128 else if (pVCpu->iem.s.cActiveMappings > 0)
14129 iemMemRollback(pVCpu);
14130
14131 return rcStrict;
14132}
14133
14134
14135VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14136 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14137{
14138 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14139
14140 VBOXSTRICTRC rcStrict;
14141 if ( cbOpcodeBytes
14142 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14143 {
14144 iemInitDecoder(pVCpu, false);
14145#ifdef IEM_WITH_CODE_TLB
14146 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14147 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14148 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14149 pVCpu->iem.s.offCurInstrStart = 0;
14150 pVCpu->iem.s.offInstrNextByte = 0;
14151#else
14152 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14153 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14154#endif
14155 rcStrict = VINF_SUCCESS;
14156 }
14157 else
14158 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14159 if (rcStrict == VINF_SUCCESS)
14160 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14161 else if (pVCpu->iem.s.cActiveMappings > 0)
14162 iemMemRollback(pVCpu);
14163
14164 return rcStrict;
14165}
14166
14167
14168VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14169{
14170 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14171
14172 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14173 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14174 if (rcStrict == VINF_SUCCESS)
14175 {
14176 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14177 if (pcbWritten)
14178 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14179 }
14180 else if (pVCpu->iem.s.cActiveMappings > 0)
14181 iemMemRollback(pVCpu);
14182
14183 return rcStrict;
14184}
14185
14186
14187VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14188 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14189{
14190 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14191
14192 VBOXSTRICTRC rcStrict;
14193 if ( cbOpcodeBytes
14194 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14195 {
14196 iemInitDecoder(pVCpu, true);
14197#ifdef IEM_WITH_CODE_TLB
14198 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14199 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14200 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14201 pVCpu->iem.s.offCurInstrStart = 0;
14202 pVCpu->iem.s.offInstrNextByte = 0;
14203#else
14204 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14205 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14206#endif
14207 rcStrict = VINF_SUCCESS;
14208 }
14209 else
14210 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14211 if (rcStrict == VINF_SUCCESS)
14212 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14213 else if (pVCpu->iem.s.cActiveMappings > 0)
14214 iemMemRollback(pVCpu);
14215
14216 return rcStrict;
14217}
14218
14219
14220/**
14221 * For debugging DISGetParamSize, may come in handy.
14222 *
14223 * @returns Strict VBox status code.
14224 * @param pVCpu The cross context virtual CPU structure of the
14225 * calling EMT.
14226 * @param pCtxCore The context core structure.
14227 * @param OpcodeBytesPC The PC of the opcode bytes.
14228 * @param pvOpcodeBytes Prefeched opcode bytes.
14229 * @param cbOpcodeBytes Number of prefetched bytes.
14230 * @param pcbWritten Where to return the number of bytes written.
14231 * Optional.
14232 */
14233VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14234 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14235 uint32_t *pcbWritten)
14236{
14237 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14238
14239 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14240 VBOXSTRICTRC rcStrict;
14241 if ( cbOpcodeBytes
14242 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14243 {
14244 iemInitDecoder(pVCpu, true);
14245#ifdef IEM_WITH_CODE_TLB
14246 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14247 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14248 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14249 pVCpu->iem.s.offCurInstrStart = 0;
14250 pVCpu->iem.s.offInstrNextByte = 0;
14251#else
14252 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14253 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14254#endif
14255 rcStrict = VINF_SUCCESS;
14256 }
14257 else
14258 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14259 if (rcStrict == VINF_SUCCESS)
14260 {
14261 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14262 if (pcbWritten)
14263 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14264 }
14265 else if (pVCpu->iem.s.cActiveMappings > 0)
14266 iemMemRollback(pVCpu);
14267
14268 return rcStrict;
14269}
14270
14271
14272VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14273{
14274 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14275 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14276
14277 /*
14278 * See if there is an interrupt pending in TRPM, inject it if we can.
14279 */
14280 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14281#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14282 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14283 if (fIntrEnabled)
14284 {
14285 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14286 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14287 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14288 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14289 else
14290 {
14291 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14292 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14293 }
14294 }
14295#else
14296 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14297#endif
14298
14299 /** @todo What if we are injecting an exception and not an interrupt? Is that
14300 * possible here? For now we assert it is indeed only an interrupt. */
14301 if ( fIntrEnabled
14302 && TRPMHasTrap(pVCpu)
14303 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14304 {
14305 uint8_t u8TrapNo;
14306 TRPMEVENT enmType;
14307 uint32_t uErrCode;
14308 RTGCPTR uCr2;
14309 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14310 AssertRC(rc2);
14311 Assert(enmType == TRPM_HARDWARE_INT);
14312 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14313 TRPMResetTrap(pVCpu);
14314#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14315 /* Injecting an event may cause a VM-exit. */
14316 if ( rcStrict != VINF_SUCCESS
14317 && rcStrict != VINF_IEM_RAISED_XCPT)
14318 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14319#else
14320 NOREF(rcStrict);
14321#endif
14322 }
14323
14324 /*
14325 * Initial decoder init w/ prefetch, then setup setjmp.
14326 */
14327 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14328 if (rcStrict == VINF_SUCCESS)
14329 {
14330#ifdef IEM_WITH_SETJMP
14331 jmp_buf JmpBuf;
14332 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14333 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14334 pVCpu->iem.s.cActiveMappings = 0;
14335 if ((rcStrict = setjmp(JmpBuf)) == 0)
14336#endif
14337 {
14338 /*
14339 * The run loop. We limit ourselves to 4096 instructions right now.
14340 */
14341 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14342 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14343 for (;;)
14344 {
14345 /*
14346 * Log the state.
14347 */
14348#ifdef LOG_ENABLED
14349 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14350#endif
14351
14352 /*
14353 * Do the decoding and emulation.
14354 */
14355 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14356 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14357 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14358 {
14359 Assert(pVCpu->iem.s.cActiveMappings == 0);
14360 pVCpu->iem.s.cInstructions++;
14361 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14362 {
14363 uint64_t fCpu = pVCpu->fLocalForcedActions
14364 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14365 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14366 | VMCPU_FF_TLB_FLUSH
14367 | VMCPU_FF_INHIBIT_INTERRUPTS
14368 | VMCPU_FF_BLOCK_NMIS
14369 | VMCPU_FF_UNHALT ));
14370
14371 if (RT_LIKELY( ( !fCpu
14372 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14373 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14374 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14375 {
14376 if (cMaxInstructionsGccStupidity-- > 0)
14377 {
14378 /* Poll timers every now an then according to the caller's specs. */
14379 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14380 || !TMTimerPollBool(pVM, pVCpu))
14381 {
14382 Assert(pVCpu->iem.s.cActiveMappings == 0);
14383 iemReInitDecoder(pVCpu);
14384 continue;
14385 }
14386 }
14387 }
14388 }
14389 Assert(pVCpu->iem.s.cActiveMappings == 0);
14390 }
14391 else if (pVCpu->iem.s.cActiveMappings > 0)
14392 iemMemRollback(pVCpu);
14393 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14394 break;
14395 }
14396 }
14397#ifdef IEM_WITH_SETJMP
14398 else
14399 {
14400 if (pVCpu->iem.s.cActiveMappings > 0)
14401 iemMemRollback(pVCpu);
14402# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14403 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14404# endif
14405 pVCpu->iem.s.cLongJumps++;
14406 }
14407 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14408#endif
14409
14410 /*
14411 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14412 */
14413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14414 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14415 }
14416 else
14417 {
14418 if (pVCpu->iem.s.cActiveMappings > 0)
14419 iemMemRollback(pVCpu);
14420
14421#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14422 /*
14423 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14424 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14425 */
14426 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14427#endif
14428 }
14429
14430 /*
14431 * Maybe re-enter raw-mode and log.
14432 */
14433 if (rcStrict != VINF_SUCCESS)
14434 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14435 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14436 if (pcInstructions)
14437 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14438 return rcStrict;
14439}
14440
14441
14442/**
14443 * Interface used by EMExecuteExec, does exit statistics and limits.
14444 *
14445 * @returns Strict VBox status code.
14446 * @param pVCpu The cross context virtual CPU structure.
14447 * @param fWillExit To be defined.
14448 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14449 * @param cMaxInstructions Maximum number of instructions to execute.
14450 * @param cMaxInstructionsWithoutExits
14451 * The max number of instructions without exits.
14452 * @param pStats Where to return statistics.
14453 */
14454VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14455 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14456{
14457 NOREF(fWillExit); /** @todo define flexible exit crits */
14458
14459 /*
14460 * Initialize return stats.
14461 */
14462 pStats->cInstructions = 0;
14463 pStats->cExits = 0;
14464 pStats->cMaxExitDistance = 0;
14465 pStats->cReserved = 0;
14466
14467 /*
14468 * Initial decoder init w/ prefetch, then setup setjmp.
14469 */
14470 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14471 if (rcStrict == VINF_SUCCESS)
14472 {
14473#ifdef IEM_WITH_SETJMP
14474 jmp_buf JmpBuf;
14475 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14476 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14477 pVCpu->iem.s.cActiveMappings = 0;
14478 if ((rcStrict = setjmp(JmpBuf)) == 0)
14479#endif
14480 {
14481#ifdef IN_RING0
14482 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14483#endif
14484 uint32_t cInstructionSinceLastExit = 0;
14485
14486 /*
14487 * The run loop. We limit ourselves to 4096 instructions right now.
14488 */
14489 PVM pVM = pVCpu->CTX_SUFF(pVM);
14490 for (;;)
14491 {
14492 /*
14493 * Log the state.
14494 */
14495#ifdef LOG_ENABLED
14496 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14497#endif
14498
14499 /*
14500 * Do the decoding and emulation.
14501 */
14502 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14503
14504 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14505 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14506
14507 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14508 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14509 {
14510 pStats->cExits += 1;
14511 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14512 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14513 cInstructionSinceLastExit = 0;
14514 }
14515
14516 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14517 {
14518 Assert(pVCpu->iem.s.cActiveMappings == 0);
14519 pVCpu->iem.s.cInstructions++;
14520 pStats->cInstructions++;
14521 cInstructionSinceLastExit++;
14522 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14523 {
14524 uint64_t fCpu = pVCpu->fLocalForcedActions
14525 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14526 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14527 | VMCPU_FF_TLB_FLUSH
14528 | VMCPU_FF_INHIBIT_INTERRUPTS
14529 | VMCPU_FF_BLOCK_NMIS
14530 | VMCPU_FF_UNHALT ));
14531
14532 if (RT_LIKELY( ( ( !fCpu
14533 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14534 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14535 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14536 || pStats->cInstructions < cMinInstructions))
14537 {
14538 if (pStats->cInstructions < cMaxInstructions)
14539 {
14540 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14541 {
14542#ifdef IN_RING0
14543 if ( !fCheckPreemptionPending
14544 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14545#endif
14546 {
14547 Assert(pVCpu->iem.s.cActiveMappings == 0);
14548 iemReInitDecoder(pVCpu);
14549 continue;
14550 }
14551#ifdef IN_RING0
14552 rcStrict = VINF_EM_RAW_INTERRUPT;
14553 break;
14554#endif
14555 }
14556 }
14557 }
14558 Assert(!(fCpu & VMCPU_FF_IEM));
14559 }
14560 Assert(pVCpu->iem.s.cActiveMappings == 0);
14561 }
14562 else if (pVCpu->iem.s.cActiveMappings > 0)
14563 iemMemRollback(pVCpu);
14564 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14565 break;
14566 }
14567 }
14568#ifdef IEM_WITH_SETJMP
14569 else
14570 {
14571 if (pVCpu->iem.s.cActiveMappings > 0)
14572 iemMemRollback(pVCpu);
14573 pVCpu->iem.s.cLongJumps++;
14574 }
14575 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14576#endif
14577
14578 /*
14579 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14580 */
14581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14582 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14583 }
14584 else
14585 {
14586 if (pVCpu->iem.s.cActiveMappings > 0)
14587 iemMemRollback(pVCpu);
14588
14589#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14590 /*
14591 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14592 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14593 */
14594 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14595#endif
14596 }
14597
14598 /*
14599 * Maybe re-enter raw-mode and log.
14600 */
14601 if (rcStrict != VINF_SUCCESS)
14602 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14603 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14604 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14605 return rcStrict;
14606}
14607
14608
14609/**
14610 * Injects a trap, fault, abort, software interrupt or external interrupt.
14611 *
14612 * The parameter list matches TRPMQueryTrapAll pretty closely.
14613 *
14614 * @returns Strict VBox status code.
14615 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14616 * @param u8TrapNo The trap number.
14617 * @param enmType What type is it (trap/fault/abort), software
14618 * interrupt or hardware interrupt.
14619 * @param uErrCode The error code if applicable.
14620 * @param uCr2 The CR2 value if applicable.
14621 * @param cbInstr The instruction length (only relevant for
14622 * software interrupts).
14623 */
14624VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14625 uint8_t cbInstr)
14626{
14627 iemInitDecoder(pVCpu, false);
14628#ifdef DBGFTRACE_ENABLED
14629 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14630 u8TrapNo, enmType, uErrCode, uCr2);
14631#endif
14632
14633 uint32_t fFlags;
14634 switch (enmType)
14635 {
14636 case TRPM_HARDWARE_INT:
14637 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14638 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14639 uErrCode = uCr2 = 0;
14640 break;
14641
14642 case TRPM_SOFTWARE_INT:
14643 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14644 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14645 uErrCode = uCr2 = 0;
14646 break;
14647
14648 case TRPM_TRAP:
14649 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14650 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14651 if (u8TrapNo == X86_XCPT_PF)
14652 fFlags |= IEM_XCPT_FLAGS_CR2;
14653 switch (u8TrapNo)
14654 {
14655 case X86_XCPT_DF:
14656 case X86_XCPT_TS:
14657 case X86_XCPT_NP:
14658 case X86_XCPT_SS:
14659 case X86_XCPT_PF:
14660 case X86_XCPT_AC:
14661 case X86_XCPT_GP:
14662 fFlags |= IEM_XCPT_FLAGS_ERR;
14663 break;
14664 }
14665 break;
14666
14667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14668 }
14669
14670 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14671
14672 if (pVCpu->iem.s.cActiveMappings > 0)
14673 iemMemRollback(pVCpu);
14674
14675 return rcStrict;
14676}
14677
14678
14679/**
14680 * Injects the active TRPM event.
14681 *
14682 * @returns Strict VBox status code.
14683 * @param pVCpu The cross context virtual CPU structure.
14684 */
14685VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14686{
14687#ifndef IEM_IMPLEMENTS_TASKSWITCH
14688 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14689#else
14690 uint8_t u8TrapNo;
14691 TRPMEVENT enmType;
14692 uint32_t uErrCode;
14693 RTGCUINTPTR uCr2;
14694 uint8_t cbInstr;
14695 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14696 if (RT_FAILURE(rc))
14697 return rc;
14698
14699 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14700 * ICEBP \#DB injection as a special case. */
14701 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14702#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14703 if (rcStrict == VINF_SVM_VMEXIT)
14704 rcStrict = VINF_SUCCESS;
14705#endif
14706#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14707 if (rcStrict == VINF_VMX_VMEXIT)
14708 rcStrict = VINF_SUCCESS;
14709#endif
14710 /** @todo Are there any other codes that imply the event was successfully
14711 * delivered to the guest? See @bugref{6607}. */
14712 if ( rcStrict == VINF_SUCCESS
14713 || rcStrict == VINF_IEM_RAISED_XCPT)
14714 TRPMResetTrap(pVCpu);
14715
14716 return rcStrict;
14717#endif
14718}
14719
14720
14721VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14722{
14723 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14724 return VERR_NOT_IMPLEMENTED;
14725}
14726
14727
14728VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14729{
14730 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14731 return VERR_NOT_IMPLEMENTED;
14732}
14733
14734
14735#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14736/**
14737 * Executes a IRET instruction with default operand size.
14738 *
14739 * This is for PATM.
14740 *
14741 * @returns VBox status code.
14742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14743 * @param pCtxCore The register frame.
14744 */
14745VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14746{
14747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14748
14749 iemCtxCoreToCtx(pCtx, pCtxCore);
14750 iemInitDecoder(pVCpu);
14751 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14752 if (rcStrict == VINF_SUCCESS)
14753 iemCtxToCtxCore(pCtxCore, pCtx);
14754 else
14755 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14756 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14757 return rcStrict;
14758}
14759#endif
14760
14761
14762/**
14763 * Macro used by the IEMExec* method to check the given instruction length.
14764 *
14765 * Will return on failure!
14766 *
14767 * @param a_cbInstr The given instruction length.
14768 * @param a_cbMin The minimum length.
14769 */
14770#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14771 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14772 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14773
14774
14775/**
14776 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14777 *
14778 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14779 *
14780 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14782 * @param rcStrict The status code to fiddle.
14783 */
14784DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14785{
14786 iemUninitExec(pVCpu);
14787 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14788}
14789
14790
14791/**
14792 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14793 *
14794 * This API ASSUMES that the caller has already verified that the guest code is
14795 * allowed to access the I/O port. (The I/O port is in the DX register in the
14796 * guest state.)
14797 *
14798 * @returns Strict VBox status code.
14799 * @param pVCpu The cross context virtual CPU structure.
14800 * @param cbValue The size of the I/O port access (1, 2, or 4).
14801 * @param enmAddrMode The addressing mode.
14802 * @param fRepPrefix Indicates whether a repeat prefix is used
14803 * (doesn't matter which for this instruction).
14804 * @param cbInstr The instruction length in bytes.
14805 * @param iEffSeg The effective segment address.
14806 * @param fIoChecked Whether the access to the I/O port has been
14807 * checked or not. It's typically checked in the
14808 * HM scenario.
14809 */
14810VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14811 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14812{
14813 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14814 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14815
14816 /*
14817 * State init.
14818 */
14819 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14820
14821 /*
14822 * Switch orgy for getting to the right handler.
14823 */
14824 VBOXSTRICTRC rcStrict;
14825 if (fRepPrefix)
14826 {
14827 switch (enmAddrMode)
14828 {
14829 case IEMMODE_16BIT:
14830 switch (cbValue)
14831 {
14832 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14833 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14834 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14835 default:
14836 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14837 }
14838 break;
14839
14840 case IEMMODE_32BIT:
14841 switch (cbValue)
14842 {
14843 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14844 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14845 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14846 default:
14847 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14848 }
14849 break;
14850
14851 case IEMMODE_64BIT:
14852 switch (cbValue)
14853 {
14854 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14855 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14856 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14857 default:
14858 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14859 }
14860 break;
14861
14862 default:
14863 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14864 }
14865 }
14866 else
14867 {
14868 switch (enmAddrMode)
14869 {
14870 case IEMMODE_16BIT:
14871 switch (cbValue)
14872 {
14873 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14874 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14875 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14876 default:
14877 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14878 }
14879 break;
14880
14881 case IEMMODE_32BIT:
14882 switch (cbValue)
14883 {
14884 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14887 default:
14888 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14889 }
14890 break;
14891
14892 case IEMMODE_64BIT:
14893 switch (cbValue)
14894 {
14895 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14896 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14897 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14898 default:
14899 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14900 }
14901 break;
14902
14903 default:
14904 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14905 }
14906 }
14907
14908 if (pVCpu->iem.s.cActiveMappings)
14909 iemMemRollback(pVCpu);
14910
14911 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14912}
14913
14914
14915/**
14916 * Interface for HM and EM for executing string I/O IN (read) instructions.
14917 *
14918 * This API ASSUMES that the caller has already verified that the guest code is
14919 * allowed to access the I/O port. (The I/O port is in the DX register in the
14920 * guest state.)
14921 *
14922 * @returns Strict VBox status code.
14923 * @param pVCpu The cross context virtual CPU structure.
14924 * @param cbValue The size of the I/O port access (1, 2, or 4).
14925 * @param enmAddrMode The addressing mode.
14926 * @param fRepPrefix Indicates whether a repeat prefix is used
14927 * (doesn't matter which for this instruction).
14928 * @param cbInstr The instruction length in bytes.
14929 * @param fIoChecked Whether the access to the I/O port has been
14930 * checked or not. It's typically checked in the
14931 * HM scenario.
14932 */
14933VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14934 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14935{
14936 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14937
14938 /*
14939 * State init.
14940 */
14941 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14942
14943 /*
14944 * Switch orgy for getting to the right handler.
14945 */
14946 VBOXSTRICTRC rcStrict;
14947 if (fRepPrefix)
14948 {
14949 switch (enmAddrMode)
14950 {
14951 case IEMMODE_16BIT:
14952 switch (cbValue)
14953 {
14954 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14955 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14956 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14957 default:
14958 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14959 }
14960 break;
14961
14962 case IEMMODE_32BIT:
14963 switch (cbValue)
14964 {
14965 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14966 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14967 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14968 default:
14969 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14970 }
14971 break;
14972
14973 case IEMMODE_64BIT:
14974 switch (cbValue)
14975 {
14976 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14977 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14978 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14979 default:
14980 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14981 }
14982 break;
14983
14984 default:
14985 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14986 }
14987 }
14988 else
14989 {
14990 switch (enmAddrMode)
14991 {
14992 case IEMMODE_16BIT:
14993 switch (cbValue)
14994 {
14995 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14996 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14997 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14998 default:
14999 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15000 }
15001 break;
15002
15003 case IEMMODE_32BIT:
15004 switch (cbValue)
15005 {
15006 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15007 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15008 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15009 default:
15010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15011 }
15012 break;
15013
15014 case IEMMODE_64BIT:
15015 switch (cbValue)
15016 {
15017 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15018 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15019 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15020 default:
15021 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15022 }
15023 break;
15024
15025 default:
15026 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15027 }
15028 }
15029
15030 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15031 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15032}
15033
15034
15035/**
15036 * Interface for rawmode to write execute an OUT instruction.
15037 *
15038 * @returns Strict VBox status code.
15039 * @param pVCpu The cross context virtual CPU structure.
15040 * @param cbInstr The instruction length in bytes.
15041 * @param u16Port The port to read.
15042 * @param fImm Whether the port is specified using an immediate operand or
15043 * using the implicit DX register.
15044 * @param cbReg The register size.
15045 *
15046 * @remarks In ring-0 not all of the state needs to be synced in.
15047 */
15048VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15049{
15050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15051 Assert(cbReg <= 4 && cbReg != 3);
15052
15053 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15054 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15055 Assert(!pVCpu->iem.s.cActiveMappings);
15056 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15057}
15058
15059
15060/**
15061 * Interface for rawmode to write execute an IN instruction.
15062 *
15063 * @returns Strict VBox status code.
15064 * @param pVCpu The cross context virtual CPU structure.
15065 * @param cbInstr The instruction length in bytes.
15066 * @param u16Port The port to read.
15067 * @param fImm Whether the port is specified using an immediate operand or
15068 * using the implicit DX.
15069 * @param cbReg The register size.
15070 */
15071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15072{
15073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15074 Assert(cbReg <= 4 && cbReg != 3);
15075
15076 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15078 Assert(!pVCpu->iem.s.cActiveMappings);
15079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15080}
15081
15082
15083/**
15084 * Interface for HM and EM to write to a CRx register.
15085 *
15086 * @returns Strict VBox status code.
15087 * @param pVCpu The cross context virtual CPU structure.
15088 * @param cbInstr The instruction length in bytes.
15089 * @param iCrReg The control register number (destination).
15090 * @param iGReg The general purpose register number (source).
15091 *
15092 * @remarks In ring-0 not all of the state needs to be synced in.
15093 */
15094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15095{
15096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15097 Assert(iCrReg < 16);
15098 Assert(iGReg < 16);
15099
15100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15101 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15102 Assert(!pVCpu->iem.s.cActiveMappings);
15103 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15104}
15105
15106
15107/**
15108 * Interface for HM and EM to read from a CRx register.
15109 *
15110 * @returns Strict VBox status code.
15111 * @param pVCpu The cross context virtual CPU structure.
15112 * @param cbInstr The instruction length in bytes.
15113 * @param iGReg The general purpose register number (destination).
15114 * @param iCrReg The control register number (source).
15115 *
15116 * @remarks In ring-0 not all of the state needs to be synced in.
15117 */
15118VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15119{
15120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15122 | CPUMCTX_EXTRN_APIC_TPR);
15123 Assert(iCrReg < 16);
15124 Assert(iGReg < 16);
15125
15126 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15127 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15128 Assert(!pVCpu->iem.s.cActiveMappings);
15129 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15130}
15131
15132
15133/**
15134 * Interface for HM and EM to clear the CR0[TS] bit.
15135 *
15136 * @returns Strict VBox status code.
15137 * @param pVCpu The cross context virtual CPU structure.
15138 * @param cbInstr The instruction length in bytes.
15139 *
15140 * @remarks In ring-0 not all of the state needs to be synced in.
15141 */
15142VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15143{
15144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15145
15146 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15147 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15148 Assert(!pVCpu->iem.s.cActiveMappings);
15149 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15150}
15151
15152
15153/**
15154 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15155 *
15156 * @returns Strict VBox status code.
15157 * @param pVCpu The cross context virtual CPU structure.
15158 * @param cbInstr The instruction length in bytes.
15159 * @param uValue The value to load into CR0.
15160 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15161 * memory operand. Otherwise pass NIL_RTGCPTR.
15162 *
15163 * @remarks In ring-0 not all of the state needs to be synced in.
15164 */
15165VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15166{
15167 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15168
15169 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15170 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15171 Assert(!pVCpu->iem.s.cActiveMappings);
15172 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15173}
15174
15175
15176/**
15177 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15178 *
15179 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15180 *
15181 * @returns Strict VBox status code.
15182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15183 * @param cbInstr The instruction length in bytes.
15184 * @remarks In ring-0 not all of the state needs to be synced in.
15185 * @thread EMT(pVCpu)
15186 */
15187VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15188{
15189 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15190
15191 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15192 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15193 Assert(!pVCpu->iem.s.cActiveMappings);
15194 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15195}
15196
15197
15198/**
15199 * Interface for HM and EM to emulate the WBINVD instruction.
15200 *
15201 * @returns Strict VBox status code.
15202 * @param pVCpu The cross context virtual CPU structure.
15203 * @param cbInstr The instruction length in bytes.
15204 *
15205 * @remarks In ring-0 not all of the state needs to be synced in.
15206 */
15207VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15208{
15209 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15210
15211 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15212 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15213 Assert(!pVCpu->iem.s.cActiveMappings);
15214 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15215}
15216
15217
15218/**
15219 * Interface for HM and EM to emulate the INVD instruction.
15220 *
15221 * @returns Strict VBox status code.
15222 * @param pVCpu The cross context virtual CPU structure.
15223 * @param cbInstr The instruction length in bytes.
15224 *
15225 * @remarks In ring-0 not all of the state needs to be synced in.
15226 */
15227VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15228{
15229 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15230
15231 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15232 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15233 Assert(!pVCpu->iem.s.cActiveMappings);
15234 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15235}
15236
15237
15238/**
15239 * Interface for HM and EM to emulate the INVLPG instruction.
15240 *
15241 * @returns Strict VBox status code.
15242 * @retval VINF_PGM_SYNC_CR3
15243 *
15244 * @param pVCpu The cross context virtual CPU structure.
15245 * @param cbInstr The instruction length in bytes.
15246 * @param GCPtrPage The effective address of the page to invalidate.
15247 *
15248 * @remarks In ring-0 not all of the state needs to be synced in.
15249 */
15250VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15251{
15252 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15253
15254 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15255 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15256 Assert(!pVCpu->iem.s.cActiveMappings);
15257 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15258}
15259
15260
15261/**
15262 * Interface for HM and EM to emulate the INVPCID instruction.
15263 *
15264 * @returns Strict VBox status code.
15265 * @retval VINF_PGM_SYNC_CR3
15266 *
15267 * @param pVCpu The cross context virtual CPU structure.
15268 * @param cbInstr The instruction length in bytes.
15269 * @param iEffSeg The effective segment register.
15270 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15271 * @param uType The invalidation type.
15272 *
15273 * @remarks In ring-0 not all of the state needs to be synced in.
15274 */
15275VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15276 uint64_t uType)
15277{
15278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15279
15280 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15281 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15282 Assert(!pVCpu->iem.s.cActiveMappings);
15283 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15284}
15285
15286
15287/**
15288 * Interface for HM and EM to emulate the CPUID instruction.
15289 *
15290 * @returns Strict VBox status code.
15291 *
15292 * @param pVCpu The cross context virtual CPU structure.
15293 * @param cbInstr The instruction length in bytes.
15294 *
15295 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15296 */
15297VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15298{
15299 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15300 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15301
15302 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15303 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15304 Assert(!pVCpu->iem.s.cActiveMappings);
15305 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15306}
15307
15308
15309/**
15310 * Interface for HM and EM to emulate the RDPMC instruction.
15311 *
15312 * @returns Strict VBox status code.
15313 *
15314 * @param pVCpu The cross context virtual CPU structure.
15315 * @param cbInstr The instruction length in bytes.
15316 *
15317 * @remarks Not all of the state needs to be synced in.
15318 */
15319VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15320{
15321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15322 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15323
15324 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15325 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15326 Assert(!pVCpu->iem.s.cActiveMappings);
15327 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15328}
15329
15330
15331/**
15332 * Interface for HM and EM to emulate the RDTSC instruction.
15333 *
15334 * @returns Strict VBox status code.
15335 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15336 *
15337 * @param pVCpu The cross context virtual CPU structure.
15338 * @param cbInstr The instruction length in bytes.
15339 *
15340 * @remarks Not all of the state needs to be synced in.
15341 */
15342VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15343{
15344 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15346
15347 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15349 Assert(!pVCpu->iem.s.cActiveMappings);
15350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15351}
15352
15353
15354/**
15355 * Interface for HM and EM to emulate the RDTSCP instruction.
15356 *
15357 * @returns Strict VBox status code.
15358 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15359 *
15360 * @param pVCpu The cross context virtual CPU structure.
15361 * @param cbInstr The instruction length in bytes.
15362 *
15363 * @remarks Not all of the state needs to be synced in. Recommended
15364 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15365 */
15366VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15367{
15368 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15369 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15370
15371 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15372 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15373 Assert(!pVCpu->iem.s.cActiveMappings);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the RDMSR instruction.
15380 *
15381 * @returns Strict VBox status code.
15382 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15383 *
15384 * @param pVCpu The cross context virtual CPU structure.
15385 * @param cbInstr The instruction length in bytes.
15386 *
15387 * @remarks Not all of the state needs to be synced in. Requires RCX and
15388 * (currently) all MSRs.
15389 */
15390VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15391{
15392 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15393 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15394
15395 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15396 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15397 Assert(!pVCpu->iem.s.cActiveMappings);
15398 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15399}
15400
15401
15402/**
15403 * Interface for HM and EM to emulate the WRMSR instruction.
15404 *
15405 * @returns Strict VBox status code.
15406 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15407 *
15408 * @param pVCpu The cross context virtual CPU structure.
15409 * @param cbInstr The instruction length in bytes.
15410 *
15411 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15412 * and (currently) all MSRs.
15413 */
15414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15415{
15416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15417 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15418 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15419
15420 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15421 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15422 Assert(!pVCpu->iem.s.cActiveMappings);
15423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15424}
15425
15426
15427/**
15428 * Interface for HM and EM to emulate the MONITOR instruction.
15429 *
15430 * @returns Strict VBox status code.
15431 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15432 *
15433 * @param pVCpu The cross context virtual CPU structure.
15434 * @param cbInstr The instruction length in bytes.
15435 *
15436 * @remarks Not all of the state needs to be synced in.
15437 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15438 * are used.
15439 */
15440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15441{
15442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15443 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15444
15445 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15446 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15447 Assert(!pVCpu->iem.s.cActiveMappings);
15448 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15449}
15450
15451
15452/**
15453 * Interface for HM and EM to emulate the MWAIT instruction.
15454 *
15455 * @returns Strict VBox status code.
15456 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15457 *
15458 * @param pVCpu The cross context virtual CPU structure.
15459 * @param cbInstr The instruction length in bytes.
15460 *
15461 * @remarks Not all of the state needs to be synced in.
15462 */
15463VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15464{
15465 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15466 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15467
15468 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15469 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15470 Assert(!pVCpu->iem.s.cActiveMappings);
15471 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15472}
15473
15474
15475/**
15476 * Interface for HM and EM to emulate the HLT instruction.
15477 *
15478 * @returns Strict VBox status code.
15479 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15480 *
15481 * @param pVCpu The cross context virtual CPU structure.
15482 * @param cbInstr The instruction length in bytes.
15483 *
15484 * @remarks Not all of the state needs to be synced in.
15485 */
15486VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15487{
15488 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15489
15490 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15491 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15492 Assert(!pVCpu->iem.s.cActiveMappings);
15493 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15494}
15495
15496
15497/**
15498 * Checks if IEM is in the process of delivering an event (interrupt or
15499 * exception).
15500 *
15501 * @returns true if we're in the process of raising an interrupt or exception,
15502 * false otherwise.
15503 * @param pVCpu The cross context virtual CPU structure.
15504 * @param puVector Where to store the vector associated with the
15505 * currently delivered event, optional.
15506 * @param pfFlags Where to store th event delivery flags (see
15507 * IEM_XCPT_FLAGS_XXX), optional.
15508 * @param puErr Where to store the error code associated with the
15509 * event, optional.
15510 * @param puCr2 Where to store the CR2 associated with the event,
15511 * optional.
15512 * @remarks The caller should check the flags to determine if the error code and
15513 * CR2 are valid for the event.
15514 */
15515VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15516{
15517 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15518 if (fRaisingXcpt)
15519 {
15520 if (puVector)
15521 *puVector = pVCpu->iem.s.uCurXcpt;
15522 if (pfFlags)
15523 *pfFlags = pVCpu->iem.s.fCurXcpt;
15524 if (puErr)
15525 *puErr = pVCpu->iem.s.uCurXcptErr;
15526 if (puCr2)
15527 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15528 }
15529 return fRaisingXcpt;
15530}
15531
15532#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15533
15534/**
15535 * Interface for HM and EM to emulate the CLGI instruction.
15536 *
15537 * @returns Strict VBox status code.
15538 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15539 * @param cbInstr The instruction length in bytes.
15540 * @thread EMT(pVCpu)
15541 */
15542VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15543{
15544 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15545
15546 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15547 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15548 Assert(!pVCpu->iem.s.cActiveMappings);
15549 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15550}
15551
15552
15553/**
15554 * Interface for HM and EM to emulate the STGI instruction.
15555 *
15556 * @returns Strict VBox status code.
15557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15558 * @param cbInstr The instruction length in bytes.
15559 * @thread EMT(pVCpu)
15560 */
15561VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15562{
15563 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15564
15565 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15566 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15567 Assert(!pVCpu->iem.s.cActiveMappings);
15568 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15569}
15570
15571
15572/**
15573 * Interface for HM and EM to emulate the VMLOAD instruction.
15574 *
15575 * @returns Strict VBox status code.
15576 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15577 * @param cbInstr The instruction length in bytes.
15578 * @thread EMT(pVCpu)
15579 */
15580VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15581{
15582 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15583
15584 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15585 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15586 Assert(!pVCpu->iem.s.cActiveMappings);
15587 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15588}
15589
15590
15591/**
15592 * Interface for HM and EM to emulate the VMSAVE instruction.
15593 *
15594 * @returns Strict VBox status code.
15595 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15596 * @param cbInstr The instruction length in bytes.
15597 * @thread EMT(pVCpu)
15598 */
15599VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15600{
15601 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15602
15603 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15604 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15605 Assert(!pVCpu->iem.s.cActiveMappings);
15606 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15607}
15608
15609
15610/**
15611 * Interface for HM and EM to emulate the INVLPGA instruction.
15612 *
15613 * @returns Strict VBox status code.
15614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15615 * @param cbInstr The instruction length in bytes.
15616 * @thread EMT(pVCpu)
15617 */
15618VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15619{
15620 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15621
15622 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15623 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15624 Assert(!pVCpu->iem.s.cActiveMappings);
15625 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15626}
15627
15628
15629/**
15630 * Interface for HM and EM to emulate the VMRUN instruction.
15631 *
15632 * @returns Strict VBox status code.
15633 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15634 * @param cbInstr The instruction length in bytes.
15635 * @thread EMT(pVCpu)
15636 */
15637VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15638{
15639 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15640 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15641
15642 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15643 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15644 Assert(!pVCpu->iem.s.cActiveMappings);
15645 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15646}
15647
15648
15649/**
15650 * Interface for HM and EM to emulate \#VMEXIT.
15651 *
15652 * @returns Strict VBox status code.
15653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15654 * @param uExitCode The exit code.
15655 * @param uExitInfo1 The exit info. 1 field.
15656 * @param uExitInfo2 The exit info. 2 field.
15657 * @thread EMT(pVCpu)
15658 */
15659VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15660{
15661 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15662 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15663 if (pVCpu->iem.s.cActiveMappings)
15664 iemMemRollback(pVCpu);
15665 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15666}
15667
15668#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15669
15670#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15671
15672/**
15673 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15674 *
15675 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15676 * are performed. Bounds checks are strict builds only.
15677 *
15678 * @param pVmcs Pointer to the virtual VMCS.
15679 * @param u64VmcsField The VMCS field.
15680 * @param pu64Dst Where to store the VMCS value.
15681 *
15682 * @remarks May be called with interrupts disabled.
15683 * @todo This should probably be moved to CPUM someday.
15684 */
15685VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15686{
15687 AssertPtr(pVmcs);
15688 AssertPtr(pu64Dst);
15689 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15690}
15691
15692
15693/**
15694 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15695 *
15696 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15697 * are performed. Bounds checks are strict builds only.
15698 *
15699 * @param pVmcs Pointer to the virtual VMCS.
15700 * @param u64VmcsField The VMCS field.
15701 * @param u64Val The value to write.
15702 *
15703 * @remarks May be called with interrupts disabled.
15704 * @todo This should probably be moved to CPUM someday.
15705 */
15706VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15707{
15708 AssertPtr(pVmcs);
15709 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15710}
15711
15712
15713/**
15714 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15715 *
15716 * @returns Strict VBox status code.
15717 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15718 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15719 * the x2APIC device.
15720 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15721 *
15722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15723 * @param idMsr The MSR being read.
15724 * @param pu64Value Pointer to the value being written or where to store the
15725 * value being read.
15726 * @param fWrite Whether this is an MSR write or read access.
15727 * @thread EMT(pVCpu)
15728 */
15729VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15730{
15731 Assert(pu64Value);
15732
15733 VBOXSTRICTRC rcStrict;
15734 if (fWrite)
15735 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15736 else
15737 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15738 Assert(!pVCpu->iem.s.cActiveMappings);
15739 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15740
15741}
15742
15743
15744/**
15745 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15746 *
15747 * @returns Strict VBox status code.
15748 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15749 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15750 *
15751 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15752 * @param pExitInfo Pointer to the VM-exit information.
15753 * @param pExitEventInfo Pointer to the VM-exit event information.
15754 * @thread EMT(pVCpu)
15755 */
15756VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15757{
15758 Assert(pExitInfo);
15759 Assert(pExitEventInfo);
15760 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15761 Assert(!pVCpu->iem.s.cActiveMappings);
15762 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15763
15764}
15765
15766
15767/**
15768 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15769 * VM-exit.
15770 *
15771 * @returns Strict VBox status code.
15772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15773 * @thread EMT(pVCpu)
15774 */
15775VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15776{
15777 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15778 Assert(!pVCpu->iem.s.cActiveMappings);
15779 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15780}
15781
15782
15783/**
15784 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15785 *
15786 * @returns Strict VBox status code.
15787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15788 * @thread EMT(pVCpu)
15789 */
15790VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15791{
15792 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15793 Assert(!pVCpu->iem.s.cActiveMappings);
15794 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15795}
15796
15797
15798/**
15799 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15800 *
15801 * @returns Strict VBox status code.
15802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15803 * @param uVector The external interrupt vector (pass 0 if the external
15804 * interrupt is still pending).
15805 * @param fIntPending Whether the external interrupt is pending or
15806 * acknowdledged in the interrupt controller.
15807 * @thread EMT(pVCpu)
15808 */
15809VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15810{
15811 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15812 Assert(!pVCpu->iem.s.cActiveMappings);
15813 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15814}
15815
15816
15817/**
15818 * Interface for HM and EM to emulate VM-exit due to exceptions.
15819 *
15820 * Exception includes NMIs, software exceptions (those generated by INT3 or
15821 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15822 *
15823 * @returns Strict VBox status code.
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @param pExitInfo Pointer to the VM-exit information.
15826 * @param pExitEventInfo Pointer to the VM-exit event information.
15827 * @thread EMT(pVCpu)
15828 */
15829VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15830{
15831 Assert(pExitInfo);
15832 Assert(pExitEventInfo);
15833 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15834 Assert(!pVCpu->iem.s.cActiveMappings);
15835 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15836}
15837
15838
15839/**
15840 * Interface for HM and EM to emulate VM-exit due to NMIs.
15841 *
15842 * @returns Strict VBox status code.
15843 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15844 * @thread EMT(pVCpu)
15845 */
15846VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15847{
15848 VMXVEXITINFO ExitInfo;
15849 RT_ZERO(ExitInfo);
15850 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15851
15852 VMXVEXITEVENTINFO ExitEventInfo;
15853 RT_ZERO(ExitEventInfo);
15854 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15855 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15856 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15857
15858 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15859 Assert(!pVCpu->iem.s.cActiveMappings);
15860 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15861}
15862
15863
15864/**
15865 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15866 *
15867 * @returns Strict VBox status code.
15868 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15869 * @thread EMT(pVCpu)
15870 */
15871VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15872{
15873 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15874 Assert(!pVCpu->iem.s.cActiveMappings);
15875 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15876}
15877
15878
15879/**
15880 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15881 *
15882 * @returns Strict VBox status code.
15883 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15884 * @param uVector The SIPI vector.
15885 * @thread EMT(pVCpu)
15886 */
15887VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15888{
15889 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15890 Assert(!pVCpu->iem.s.cActiveMappings);
15891 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15892}
15893
15894
15895/**
15896 * Interface for HM and EM to emulate a VM-exit.
15897 *
15898 * If a specialized version of a VM-exit handler exists, that must be used instead.
15899 *
15900 * @returns Strict VBox status code.
15901 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15902 * @param uExitReason The VM-exit reason.
15903 * @param u64ExitQual The Exit qualification.
15904 * @thread EMT(pVCpu)
15905 */
15906VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15907{
15908 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15909 Assert(!pVCpu->iem.s.cActiveMappings);
15910 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15911}
15912
15913
15914/**
15915 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15916 *
15917 * This is meant to be used for those instructions that VMX provides additional
15918 * decoding information beyond just the instruction length!
15919 *
15920 * @returns Strict VBox status code.
15921 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15922 * @param pExitInfo Pointer to the VM-exit information.
15923 * @thread EMT(pVCpu)
15924 */
15925VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15926{
15927 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15928 Assert(!pVCpu->iem.s.cActiveMappings);
15929 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15930}
15931
15932
15933/**
15934 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15935 *
15936 * This is meant to be used for those instructions that VMX provides only the
15937 * instruction length.
15938 *
15939 * @returns Strict VBox status code.
15940 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15941 * @param pExitInfo Pointer to the VM-exit information.
15942 * @param cbInstr The instruction length in bytes.
15943 * @thread EMT(pVCpu)
15944 */
15945VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15946{
15947 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15948 Assert(!pVCpu->iem.s.cActiveMappings);
15949 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15950}
15951
15952
15953/**
15954 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15955 * Virtualized-EOI, TPR-below threshold).
15956 *
15957 * @returns Strict VBox status code.
15958 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15959 * @param pExitInfo Pointer to the VM-exit information.
15960 * @thread EMT(pVCpu)
15961 */
15962VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15963{
15964 Assert(pExitInfo);
15965 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
15966 Assert(!pVCpu->iem.s.cActiveMappings);
15967 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15968}
15969
15970
15971/**
15972 * Interface for HM and EM to emulate a VM-exit due to a task switch.
15973 *
15974 * @returns Strict VBox status code.
15975 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15976 * @param pExitInfo Pointer to the VM-exit information.
15977 * @param pExitEventInfo Pointer to the VM-exit event information.
15978 * @thread EMT(pVCpu)
15979 */
15980VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15981{
15982 Assert(pExitInfo);
15983 Assert(pExitEventInfo);
15984 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
15985 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15986 Assert(!pVCpu->iem.s.cActiveMappings);
15987 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15988}
15989
15990
15991/**
15992 * Interface for HM and EM to emulate the VMREAD instruction.
15993 *
15994 * @returns Strict VBox status code.
15995 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15996 * @param pExitInfo Pointer to the VM-exit information.
15997 * @thread EMT(pVCpu)
15998 */
15999VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16000{
16001 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16002 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16003 Assert(pExitInfo);
16004
16005 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16006
16007 VBOXSTRICTRC rcStrict;
16008 uint8_t const cbInstr = pExitInfo->cbInstr;
16009 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16010 uint64_t const u64FieldEnc = fIs64BitMode
16011 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16012 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16013 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16014 {
16015 if (fIs64BitMode)
16016 {
16017 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16018 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16019 }
16020 else
16021 {
16022 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16023 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16024 }
16025 }
16026 else
16027 {
16028 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16029 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16030 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16031 }
16032 Assert(!pVCpu->iem.s.cActiveMappings);
16033 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16034}
16035
16036
16037/**
16038 * Interface for HM and EM to emulate the VMWRITE instruction.
16039 *
16040 * @returns Strict VBox status code.
16041 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16042 * @param pExitInfo Pointer to the VM-exit information.
16043 * @thread EMT(pVCpu)
16044 */
16045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16046{
16047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16048 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16049 Assert(pExitInfo);
16050
16051 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16052
16053 uint64_t u64Val;
16054 uint8_t iEffSeg;
16055 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16056 {
16057 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16058 iEffSeg = UINT8_MAX;
16059 }
16060 else
16061 {
16062 u64Val = pExitInfo->GCPtrEffAddr;
16063 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16064 }
16065 uint8_t const cbInstr = pExitInfo->cbInstr;
16066 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16067 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16068 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16069 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16070 Assert(!pVCpu->iem.s.cActiveMappings);
16071 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16072}
16073
16074
16075/**
16076 * Interface for HM and EM to emulate the VMPTRLD instruction.
16077 *
16078 * @returns Strict VBox status code.
16079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16080 * @param pExitInfo Pointer to the VM-exit information.
16081 * @thread EMT(pVCpu)
16082 */
16083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16084{
16085 Assert(pExitInfo);
16086 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16088
16089 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16090
16091 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16092 uint8_t const cbInstr = pExitInfo->cbInstr;
16093 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16094 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16095 Assert(!pVCpu->iem.s.cActiveMappings);
16096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16097}
16098
16099
16100/**
16101 * Interface for HM and EM to emulate the VMPTRST instruction.
16102 *
16103 * @returns Strict VBox status code.
16104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16105 * @param pExitInfo Pointer to the VM-exit information.
16106 * @thread EMT(pVCpu)
16107 */
16108VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16109{
16110 Assert(pExitInfo);
16111 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16112 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16113
16114 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16115
16116 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16117 uint8_t const cbInstr = pExitInfo->cbInstr;
16118 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16119 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16120 Assert(!pVCpu->iem.s.cActiveMappings);
16121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16122}
16123
16124
16125/**
16126 * Interface for HM and EM to emulate the VMCLEAR instruction.
16127 *
16128 * @returns Strict VBox status code.
16129 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16130 * @param pExitInfo Pointer to the VM-exit information.
16131 * @thread EMT(pVCpu)
16132 */
16133VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16134{
16135 Assert(pExitInfo);
16136 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16137 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16138
16139 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16140
16141 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16142 uint8_t const cbInstr = pExitInfo->cbInstr;
16143 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16144 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16145 Assert(!pVCpu->iem.s.cActiveMappings);
16146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16147}
16148
16149
16150/**
16151 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16152 *
16153 * @returns Strict VBox status code.
16154 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16155 * @param cbInstr The instruction length in bytes.
16156 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16157 * VMXINSTRID_VMRESUME).
16158 * @thread EMT(pVCpu)
16159 */
16160VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16161{
16162 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16163 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16164
16165 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16166 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16167 Assert(!pVCpu->iem.s.cActiveMappings);
16168 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16169}
16170
16171
16172/**
16173 * Interface for HM and EM to emulate the VMXON instruction.
16174 *
16175 * @returns Strict VBox status code.
16176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16177 * @param pExitInfo Pointer to the VM-exit information.
16178 * @thread EMT(pVCpu)
16179 */
16180VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16181{
16182 Assert(pExitInfo);
16183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16184 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16185
16186 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16187
16188 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16189 uint8_t const cbInstr = pExitInfo->cbInstr;
16190 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16191 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16192 Assert(!pVCpu->iem.s.cActiveMappings);
16193 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16194}
16195
16196
16197/**
16198 * Interface for HM and EM to emulate the VMXOFF instruction.
16199 *
16200 * @returns Strict VBox status code.
16201 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16202 * @param cbInstr The instruction length in bytes.
16203 * @thread EMT(pVCpu)
16204 */
16205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16206{
16207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16209
16210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16211 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16212 Assert(!pVCpu->iem.s.cActiveMappings);
16213 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16214}
16215
16216
16217/**
16218 * Interface for HM and EM to emulate the INVVPID instruction.
16219 *
16220 * @returns Strict VBox status code.
16221 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16222 * @param pExitInfo Pointer to the VM-exit information.
16223 * @thread EMT(pVCpu)
16224 */
16225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16226{
16227 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16228 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16229 Assert(pExitInfo);
16230
16231 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16232
16233 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16234 uint8_t const cbInstr = pExitInfo->cbInstr;
16235 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16236 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16237 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16238 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16239 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16240 Assert(!pVCpu->iem.s.cActiveMappings);
16241 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16242}
16243
16244
16245/**
16246 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16247 *
16248 * @remarks The @a pvUser argument is currently unused.
16249 */
16250PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16251 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16252 PGMACCESSORIGIN enmOrigin, void *pvUser)
16253{
16254 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16255
16256 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16257 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16258 {
16259 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16260 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16261
16262 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16263 * Currently they will go through as read accesses. */
16264 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16265 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16266 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16267 if (RT_FAILURE(rcStrict))
16268 return rcStrict;
16269
16270 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16271 return VINF_SUCCESS;
16272 }
16273
16274 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16275 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16276 if (RT_FAILURE(rc))
16277 return rc;
16278
16279 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16280 return VINF_PGM_HANDLER_DO_DEFAULT;
16281}
16282
16283#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16284
16285#ifdef IN_RING3
16286
16287/**
16288 * Handles the unlikely and probably fatal merge cases.
16289 *
16290 * @returns Merged status code.
16291 * @param rcStrict Current EM status code.
16292 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16293 * with @a rcStrict.
16294 * @param iMemMap The memory mapping index. For error reporting only.
16295 * @param pVCpu The cross context virtual CPU structure of the calling
16296 * thread, for error reporting only.
16297 */
16298DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16299 unsigned iMemMap, PVMCPUCC pVCpu)
16300{
16301 if (RT_FAILURE_NP(rcStrict))
16302 return rcStrict;
16303
16304 if (RT_FAILURE_NP(rcStrictCommit))
16305 return rcStrictCommit;
16306
16307 if (rcStrict == rcStrictCommit)
16308 return rcStrictCommit;
16309
16310 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16311 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16312 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16314 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16315 return VERR_IOM_FF_STATUS_IPE;
16316}
16317
16318
16319/**
16320 * Helper for IOMR3ProcessForceFlag.
16321 *
16322 * @returns Merged status code.
16323 * @param rcStrict Current EM status code.
16324 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16325 * with @a rcStrict.
16326 * @param iMemMap The memory mapping index. For error reporting only.
16327 * @param pVCpu The cross context virtual CPU structure of the calling
16328 * thread, for error reporting only.
16329 */
16330DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16331{
16332 /* Simple. */
16333 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16334 return rcStrictCommit;
16335
16336 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16337 return rcStrict;
16338
16339 /* EM scheduling status codes. */
16340 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16341 && rcStrict <= VINF_EM_LAST))
16342 {
16343 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16344 && rcStrictCommit <= VINF_EM_LAST))
16345 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16346 }
16347
16348 /* Unlikely */
16349 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16350}
16351
16352
16353/**
16354 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16355 *
16356 * @returns Merge between @a rcStrict and what the commit operation returned.
16357 * @param pVM The cross context VM structure.
16358 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16359 * @param rcStrict The status code returned by ring-0 or raw-mode.
16360 */
16361VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16362{
16363 /*
16364 * Reset the pending commit.
16365 */
16366 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16367 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16368 ("%#x %#x %#x\n",
16369 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16370 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16371
16372 /*
16373 * Commit the pending bounce buffers (usually just one).
16374 */
16375 unsigned cBufs = 0;
16376 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16377 while (iMemMap-- > 0)
16378 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16379 {
16380 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16381 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16382 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16383
16384 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16385 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16386 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16387
16388 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16389 {
16390 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16392 pbBuf,
16393 cbFirst,
16394 PGMACCESSORIGIN_IEM);
16395 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16396 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16397 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16398 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16399 }
16400
16401 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16402 {
16403 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16405 pbBuf + cbFirst,
16406 cbSecond,
16407 PGMACCESSORIGIN_IEM);
16408 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16409 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16410 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16411 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16412 }
16413 cBufs++;
16414 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16415 }
16416
16417 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16418 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16419 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16420 pVCpu->iem.s.cActiveMappings = 0;
16421 return rcStrict;
16422}
16423
16424#endif /* IN_RING3 */
16425
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette