VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 94156

Last change on this file since 94156 was 94156, checked in by vboxsync, 3 years ago

VMM/IEM: Try deal with basic Intel/AMD EFLAGS difference for binary and div/mul operations (intel side). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 671.2 KB
Line 
1/* $Id: IEMAll.cpp 94156 2022-03-10 13:59:24Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for EPT faults.
442 */
443# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
444 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handler.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
450 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
463# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
464
465#endif
466
467#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
468/**
469 * Check if an SVM control/instruction intercept is set.
470 */
471# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
472 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
473
474/**
475 * Check if an SVM read CRx intercept is set.
476 */
477# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM write CRx intercept is set.
482 */
483# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
484 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
485
486/**
487 * Check if an SVM read DRx intercept is set.
488 */
489# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
496 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
497
498/**
499 * Check if an SVM exception intercept is set.
500 */
501# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
502 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
503
504/**
505 * Invokes the SVM \#VMEXIT handler for the nested-guest.
506 */
507# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
508 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
509
510/**
511 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
512 * corresponding decode assist information.
513 */
514# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
515 do \
516 { \
517 uint64_t uExitInfo1; \
518 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
519 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
520 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
521 else \
522 uExitInfo1 = 0; \
523 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
524 } while (0)
525
526/** Check and handles SVM nested-guest instruction intercept and updates
527 * NRIP if needed.
528 */
529# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
530 do \
531 { \
532 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
533 { \
534 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
535 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
536 } \
537 } while (0)
538
539/** Checks and handles SVM nested-guest CR0 read intercept. */
540# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
541 do \
542 { \
543 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
544 { /* probably likely */ } \
545 else \
546 { \
547 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
548 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
549 } \
550 } while (0)
551
552/**
553 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
554 */
555# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
556 do { \
557 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
558 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
559 } while (0)
560
561#else
562# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
563# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
565# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
567# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
568# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
570# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
572# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
573
574#endif
575
576
577/*********************************************************************************************************************************
578* Global Variables *
579*********************************************************************************************************************************/
580extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
581
582
583/** Function table for the ADD instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
585{
586 iemAImpl_add_u8, iemAImpl_add_u8_locked,
587 iemAImpl_add_u16, iemAImpl_add_u16_locked,
588 iemAImpl_add_u32, iemAImpl_add_u32_locked,
589 iemAImpl_add_u64, iemAImpl_add_u64_locked
590};
591
592/** Function table for the ADC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
594{
595 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
596 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
597 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
598 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
599};
600
601/** Function table for the SUB instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
603{
604 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
605 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
606 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
607 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
608};
609
610/** Function table for the SBB instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
612{
613 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
614 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
615 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
616 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
617};
618
619/** Function table for the OR instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
621{
622 iemAImpl_or_u8, iemAImpl_or_u8_locked,
623 iemAImpl_or_u16, iemAImpl_or_u16_locked,
624 iemAImpl_or_u32, iemAImpl_or_u32_locked,
625 iemAImpl_or_u64, iemAImpl_or_u64_locked
626};
627
628/** Function table for the XOR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
630{
631 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
632 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
633 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
634 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
635};
636
637/** Function table for the AND instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
639{
640 iemAImpl_and_u8, iemAImpl_and_u8_locked,
641 iemAImpl_and_u16, iemAImpl_and_u16_locked,
642 iemAImpl_and_u32, iemAImpl_and_u32_locked,
643 iemAImpl_and_u64, iemAImpl_and_u64_locked
644};
645
646/** Function table for the CMP instruction.
647 * @remarks Making operand order ASSUMPTIONS.
648 */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
650{
651 iemAImpl_cmp_u8, NULL,
652 iemAImpl_cmp_u16, NULL,
653 iemAImpl_cmp_u32, NULL,
654 iemAImpl_cmp_u64, NULL
655};
656
657/** Function table for the TEST instruction.
658 * @remarks Making operand order ASSUMPTIONS.
659 */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
661{
662 iemAImpl_test_u8, NULL,
663 iemAImpl_test_u16, NULL,
664 iemAImpl_test_u32, NULL,
665 iemAImpl_test_u64, NULL
666};
667
668/** Function table for the BT instruction. */
669IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
670{
671 NULL, NULL,
672 iemAImpl_bt_u16, NULL,
673 iemAImpl_bt_u32, NULL,
674 iemAImpl_bt_u64, NULL
675};
676
677/** Function table for the BTC instruction. */
678IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
679{
680 NULL, NULL,
681 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
682 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
683 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
684};
685
686/** Function table for the BTR instruction. */
687IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
688{
689 NULL, NULL,
690 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
691 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
692 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
693};
694
695/** Function table for the BTS instruction. */
696IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
697{
698 NULL, NULL,
699 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
700 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
701 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
702};
703
704/** Function table for the BSF instruction. */
705IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
706{
707 NULL, NULL,
708 iemAImpl_bsf_u16, NULL,
709 iemAImpl_bsf_u32, NULL,
710 iemAImpl_bsf_u64, NULL
711};
712
713/** Function table for the BSF instruction, AMD EFLAGS variant. */
714IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
715{
716 NULL, NULL,
717 iemAImpl_bsf_u16_amd, NULL,
718 iemAImpl_bsf_u32_amd, NULL,
719 iemAImpl_bsf_u64_amd, NULL
720};
721
722/** Function table for the BSF instruction, Intel EFLAGS variant. */
723IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
724{
725 NULL, NULL,
726 iemAImpl_bsf_u16_intel, NULL,
727 iemAImpl_bsf_u32_intel, NULL,
728 iemAImpl_bsf_u64_intel, NULL
729};
730
731/** EFLAGS variation selection table for the BSF instruction. */
732IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
733{
734 &g_iemAImpl_bsf,
735 &g_iemAImpl_bsf_intel,
736 &g_iemAImpl_bsf_amd,
737 &g_iemAImpl_bsf,
738};
739
740/** Function table for the BSR instruction. */
741IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
742{
743 NULL, NULL,
744 iemAImpl_bsr_u16, NULL,
745 iemAImpl_bsr_u32, NULL,
746 iemAImpl_bsr_u64, NULL
747};
748
749/** Function table for the BSR instruction, AMD EFLAGS variant. */
750IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
751{
752 NULL, NULL,
753 iemAImpl_bsr_u16_amd, NULL,
754 iemAImpl_bsr_u32_amd, NULL,
755 iemAImpl_bsr_u64_amd, NULL
756};
757
758/** Function table for the BSR instruction, Intel EFLAGS variant. */
759IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
760{
761 NULL, NULL,
762 iemAImpl_bsr_u16_intel, NULL,
763 iemAImpl_bsr_u32_intel, NULL,
764 iemAImpl_bsr_u64_intel, NULL
765};
766
767/** EFLAGS variation selection table for the BSR instruction. */
768IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
769{
770 &g_iemAImpl_bsr,
771 &g_iemAImpl_bsr_intel,
772 &g_iemAImpl_bsr_amd,
773 &g_iemAImpl_bsr,
774};
775
776/** Function table for the IMUL instruction. */
777IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
778{
779 NULL, NULL,
780 iemAImpl_imul_two_u16, NULL,
781 iemAImpl_imul_two_u32, NULL,
782 iemAImpl_imul_two_u64, NULL
783};
784
785/** Function table for the IMUL instruction, AMD EFLAGS variant. */
786IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
787{
788 NULL, NULL,
789 iemAImpl_imul_two_u16_amd, NULL,
790 iemAImpl_imul_two_u32_amd, NULL,
791 iemAImpl_imul_two_u64_amd, NULL
792};
793
794/** Function table for the IMUL instruction, Intel EFLAGS variant. */
795IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
796{
797 NULL, NULL,
798 iemAImpl_imul_two_u16_intel, NULL,
799 iemAImpl_imul_two_u32_intel, NULL,
800 iemAImpl_imul_two_u64_intel, NULL
801};
802
803/** EFLAGS variation selection table for the IMUL instruction. */
804IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
805{
806 &g_iemAImpl_imul_two,
807 &g_iemAImpl_imul_two_intel,
808 &g_iemAImpl_imul_two_amd,
809 &g_iemAImpl_imul_two,
810};
811
812/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
813IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
814{
815 iemAImpl_imul_two_u16,
816 iemAImpl_imul_two_u16_intel,
817 iemAImpl_imul_two_u16_amd,
818 iemAImpl_imul_two_u16,
819};
820
821/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
822IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
823{
824 iemAImpl_imul_two_u32,
825 iemAImpl_imul_two_u32_intel,
826 iemAImpl_imul_two_u32_amd,
827 iemAImpl_imul_two_u32,
828};
829
830/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
831IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
832{
833 iemAImpl_imul_two_u64,
834 iemAImpl_imul_two_u64_intel,
835 iemAImpl_imul_two_u64_amd,
836 iemAImpl_imul_two_u64,
837};
838
839/** Group 1 /r lookup table. */
840IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
841{
842 &g_iemAImpl_add,
843 &g_iemAImpl_or,
844 &g_iemAImpl_adc,
845 &g_iemAImpl_sbb,
846 &g_iemAImpl_and,
847 &g_iemAImpl_sub,
848 &g_iemAImpl_xor,
849 &g_iemAImpl_cmp
850};
851
852/** Function table for the INC instruction. */
853IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
854{
855 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
856 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
857 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
858 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
859};
860
861/** Function table for the DEC instruction. */
862IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
863{
864 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
865 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
866 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
867 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
868};
869
870/** Function table for the NEG instruction. */
871IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
872{
873 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
874 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
875 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
876 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
877};
878
879/** Function table for the NOT instruction. */
880IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
881{
882 iemAImpl_not_u8, iemAImpl_not_u8_locked,
883 iemAImpl_not_u16, iemAImpl_not_u16_locked,
884 iemAImpl_not_u32, iemAImpl_not_u32_locked,
885 iemAImpl_not_u64, iemAImpl_not_u64_locked
886};
887
888
889/** Function table for the ROL instruction. */
890IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
891{
892 iemAImpl_rol_u8,
893 iemAImpl_rol_u16,
894 iemAImpl_rol_u32,
895 iemAImpl_rol_u64
896};
897
898/** Function table for the ROR instruction. */
899IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
900{
901 iemAImpl_ror_u8,
902 iemAImpl_ror_u16,
903 iemAImpl_ror_u32,
904 iemAImpl_ror_u64
905};
906
907/** Function table for the RCL instruction. */
908IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
909{
910 iemAImpl_rcl_u8,
911 iemAImpl_rcl_u16,
912 iemAImpl_rcl_u32,
913 iemAImpl_rcl_u64
914};
915
916/** Function table for the RCR instruction. */
917IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
918{
919 iemAImpl_rcr_u8,
920 iemAImpl_rcr_u16,
921 iemAImpl_rcr_u32,
922 iemAImpl_rcr_u64
923};
924
925/** Function table for the SHL instruction. */
926IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
927{
928 iemAImpl_shl_u8,
929 iemAImpl_shl_u16,
930 iemAImpl_shl_u32,
931 iemAImpl_shl_u64
932};
933
934/** Function table for the SHR instruction. */
935IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
936{
937 iemAImpl_shr_u8,
938 iemAImpl_shr_u16,
939 iemAImpl_shr_u32,
940 iemAImpl_shr_u64
941};
942
943/** Function table for the SAR instruction. */
944IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
945{
946 iemAImpl_sar_u8,
947 iemAImpl_sar_u16,
948 iemAImpl_sar_u32,
949 iemAImpl_sar_u64
950};
951
952
953/** Function table for the MUL instruction. */
954IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
955{
956 iemAImpl_mul_u8,
957 iemAImpl_mul_u16,
958 iemAImpl_mul_u32,
959 iemAImpl_mul_u64
960};
961
962/** Function table for the MUL instruction, AMD EFLAGS variation. */
963IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
964{
965 iemAImpl_mul_u8_amd,
966 iemAImpl_mul_u16_amd,
967 iemAImpl_mul_u32_amd,
968 iemAImpl_mul_u64_amd
969};
970
971/** Function table for the MUL instruction, Intel EFLAGS variation. */
972IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
973{
974 iemAImpl_mul_u8_intel,
975 iemAImpl_mul_u16_intel,
976 iemAImpl_mul_u32_intel,
977 iemAImpl_mul_u64_intel
978};
979
980/** EFLAGS variation selection table for the MUL instruction. */
981IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
982{
983 &g_iemAImpl_mul,
984 &g_iemAImpl_mul_intel,
985 &g_iemAImpl_mul_amd,
986 &g_iemAImpl_mul,
987};
988
989/** EFLAGS variation selection table for the 8-bit MUL instruction. */
990IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
991{
992 iemAImpl_mul_u8,
993 iemAImpl_mul_u8_intel,
994 iemAImpl_mul_u8_amd,
995 iemAImpl_mul_u8
996};
997
998
999/** Function table for the IMUL instruction working implicitly on rAX. */
1000IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
1001{
1002 iemAImpl_imul_u8,
1003 iemAImpl_imul_u16,
1004 iemAImpl_imul_u32,
1005 iemAImpl_imul_u64
1006};
1007
1008/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
1009IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
1010{
1011 iemAImpl_imul_u8_amd,
1012 iemAImpl_imul_u16_amd,
1013 iemAImpl_imul_u32_amd,
1014 iemAImpl_imul_u64_amd
1015};
1016
1017/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
1018IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
1019{
1020 iemAImpl_imul_u8_intel,
1021 iemAImpl_imul_u16_intel,
1022 iemAImpl_imul_u32_intel,
1023 iemAImpl_imul_u64_intel
1024};
1025
1026/** EFLAGS variation selection table for the IMUL instruction. */
1027IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
1028{
1029 &g_iemAImpl_imul,
1030 &g_iemAImpl_imul_intel,
1031 &g_iemAImpl_imul_amd,
1032 &g_iemAImpl_imul,
1033};
1034
1035/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
1036IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
1037{
1038 iemAImpl_imul_u8,
1039 iemAImpl_imul_u8_intel,
1040 iemAImpl_imul_u8_amd,
1041 iemAImpl_imul_u8
1042};
1043
1044
1045/** Function table for the DIV instruction. */
1046IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
1047{
1048 iemAImpl_div_u8,
1049 iemAImpl_div_u16,
1050 iemAImpl_div_u32,
1051 iemAImpl_div_u64
1052};
1053
1054/** Function table for the DIV instruction, AMD EFLAGS variation. */
1055IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
1056{
1057 iemAImpl_div_u8_amd,
1058 iemAImpl_div_u16_amd,
1059 iemAImpl_div_u32_amd,
1060 iemAImpl_div_u64_amd
1061};
1062
1063/** Function table for the DIV instruction, Intel EFLAGS variation. */
1064IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
1065{
1066 iemAImpl_div_u8_intel,
1067 iemAImpl_div_u16_intel,
1068 iemAImpl_div_u32_intel,
1069 iemAImpl_div_u64_intel
1070};
1071
1072/** EFLAGS variation selection table for the DIV instruction. */
1073IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
1074{
1075 &g_iemAImpl_div,
1076 &g_iemAImpl_div_intel,
1077 &g_iemAImpl_div_amd,
1078 &g_iemAImpl_div,
1079};
1080
1081/** EFLAGS variation selection table for the 8-bit DIV instruction. */
1082IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
1083{
1084 iemAImpl_div_u8,
1085 iemAImpl_div_u8_intel,
1086 iemAImpl_div_u8_amd,
1087 iemAImpl_div_u8
1088};
1089
1090
1091/** Function table for the IDIV instruction. */
1092IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
1093{
1094 iemAImpl_idiv_u8,
1095 iemAImpl_idiv_u16,
1096 iemAImpl_idiv_u32,
1097 iemAImpl_idiv_u64
1098};
1099
1100/** Function table for the IDIV instruction, AMD EFLAGS variation. */
1101IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
1102{
1103 iemAImpl_idiv_u8_amd,
1104 iemAImpl_idiv_u16_amd,
1105 iemAImpl_idiv_u32_amd,
1106 iemAImpl_idiv_u64_amd
1107};
1108
1109/** Function table for the IDIV instruction, Intel EFLAGS variation. */
1110IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
1111{
1112 iemAImpl_idiv_u8_intel,
1113 iemAImpl_idiv_u16_intel,
1114 iemAImpl_idiv_u32_intel,
1115 iemAImpl_idiv_u64_intel
1116};
1117
1118/** EFLAGS variation selection table for the IDIV instruction. */
1119IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
1120{
1121 &g_iemAImpl_idiv,
1122 &g_iemAImpl_idiv_intel,
1123 &g_iemAImpl_idiv_amd,
1124 &g_iemAImpl_idiv,
1125};
1126
1127/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
1128IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
1129{
1130 iemAImpl_idiv_u8,
1131 iemAImpl_idiv_u8_intel,
1132 iemAImpl_idiv_u8_amd,
1133 iemAImpl_idiv_u8
1134};
1135
1136
1137/** Function table for the SHLD instruction */
1138IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
1139{
1140 iemAImpl_shld_u16,
1141 iemAImpl_shld_u32,
1142 iemAImpl_shld_u64,
1143};
1144
1145/** Function table for the SHRD instruction */
1146IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
1147{
1148 iemAImpl_shrd_u16,
1149 iemAImpl_shrd_u32,
1150 iemAImpl_shrd_u64,
1151};
1152
1153
1154/** Function table for the PUNPCKLBW instruction */
1155IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
1156/** Function table for the PUNPCKLBD instruction */
1157IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
1158/** Function table for the PUNPCKLDQ instruction */
1159IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
1160/** Function table for the PUNPCKLQDQ instruction */
1161IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
1162
1163/** Function table for the PUNPCKHBW instruction */
1164IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
1165/** Function table for the PUNPCKHBD instruction */
1166IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
1167/** Function table for the PUNPCKHDQ instruction */
1168IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
1169/** Function table for the PUNPCKHQDQ instruction */
1170IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
1171
1172/** Function table for the PXOR instruction */
1173IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
1174/** Function table for the PCMPEQB instruction */
1175IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
1176/** Function table for the PCMPEQW instruction */
1177IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
1178/** Function table for the PCMPEQD instruction */
1179IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
1180
1181
1182#if defined(IEM_LOG_MEMORY_WRITES)
1183/** What IEM just wrote. */
1184uint8_t g_abIemWrote[256];
1185/** How much IEM just wrote. */
1186size_t g_cbIemWrote;
1187#endif
1188
1189
1190/*********************************************************************************************************************************
1191* Internal Functions *
1192*********************************************************************************************************************************/
1193IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1194IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
1195IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
1196IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1197/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
1198IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1199IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1200IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1201IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1202IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
1203IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
1204IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
1205IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1206IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
1207IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1208IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
1209IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
1210#ifdef IEM_WITH_SETJMP
1211DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
1212DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
1213DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1214DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
1215DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1216#endif
1217
1218IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
1219IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
1220IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1221IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1222IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1223IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1224IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1225IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1226IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1227IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
1228IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
1229IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
1230IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
1231IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
1232IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
1233IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
1234DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
1235DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
1236
1237#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1238IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
1239IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
1240IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
1241IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
1242IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
1243IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
1244IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
1245IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr);
1246#endif
1247
1248#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1249IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
1250IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1251#endif
1252
1253
1254/**
1255 * Sets the pass up status.
1256 *
1257 * @returns VINF_SUCCESS.
1258 * @param pVCpu The cross context virtual CPU structure of the
1259 * calling thread.
1260 * @param rcPassUp The pass up status. Must be informational.
1261 * VINF_SUCCESS is not allowed.
1262 */
1263IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1264{
1265 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1266
1267 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1268 if (rcOldPassUp == VINF_SUCCESS)
1269 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1270 /* If both are EM scheduling codes, use EM priority rules. */
1271 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1272 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1273 {
1274 if (rcPassUp < rcOldPassUp)
1275 {
1276 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1277 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1278 }
1279 else
1280 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1281 }
1282 /* Override EM scheduling with specific status code. */
1283 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1284 {
1285 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1286 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1287 }
1288 /* Don't override specific status code, first come first served. */
1289 else
1290 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1291 return VINF_SUCCESS;
1292}
1293
1294
1295/**
1296 * Calculates the CPU mode.
1297 *
1298 * This is mainly for updating IEMCPU::enmCpuMode.
1299 *
1300 * @returns CPU mode.
1301 * @param pVCpu The cross context virtual CPU structure of the
1302 * calling thread.
1303 */
1304DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1305{
1306 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1307 return IEMMODE_64BIT;
1308 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1309 return IEMMODE_32BIT;
1310 return IEMMODE_16BIT;
1311}
1312
1313
1314/**
1315 * Initializes the execution state.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure of the
1318 * calling thread.
1319 * @param fBypassHandlers Whether to bypass access handlers.
1320 *
1321 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1322 * side-effects in strict builds.
1323 */
1324DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1325{
1326 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1327 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1336
1337 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1338 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1339#ifdef VBOX_STRICT
1340 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1341 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1342 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1343 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1344 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1345 pVCpu->iem.s.uRexReg = 127;
1346 pVCpu->iem.s.uRexB = 127;
1347 pVCpu->iem.s.offModRm = 127;
1348 pVCpu->iem.s.uRexIndex = 127;
1349 pVCpu->iem.s.iEffSeg = 127;
1350 pVCpu->iem.s.idxPrefix = 127;
1351 pVCpu->iem.s.uVex3rdReg = 127;
1352 pVCpu->iem.s.uVexLength = 127;
1353 pVCpu->iem.s.fEvexStuff = 127;
1354 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1355# ifdef IEM_WITH_CODE_TLB
1356 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1357 pVCpu->iem.s.pbInstrBuf = NULL;
1358 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1359 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1360 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1361 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1362# else
1363 pVCpu->iem.s.offOpcode = 127;
1364 pVCpu->iem.s.cbOpcode = 127;
1365# endif
1366#endif
1367
1368 pVCpu->iem.s.cActiveMappings = 0;
1369 pVCpu->iem.s.iNextMapping = 0;
1370 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1371 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1372#if 0
1373#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1374 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1375 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1376 {
1377 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1378 Assert(pVmcs);
1379 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1380 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1381 {
1382 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1383 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1384 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1385 AssertRC(rc);
1386 }
1387 }
1388#endif
1389#endif
1390}
1391
1392#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1393/**
1394 * Performs a minimal reinitialization of the execution state.
1395 *
1396 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1397 * 'world-switch' types operations on the CPU. Currently only nested
1398 * hardware-virtualization uses it.
1399 *
1400 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1401 */
1402IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1403{
1404 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1405 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1406
1407 pVCpu->iem.s.uCpl = uCpl;
1408 pVCpu->iem.s.enmCpuMode = enmMode;
1409 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1410 pVCpu->iem.s.enmEffAddrMode = enmMode;
1411 if (enmMode != IEMMODE_64BIT)
1412 {
1413 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1414 pVCpu->iem.s.enmEffOpSize = enmMode;
1415 }
1416 else
1417 {
1418 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1419 pVCpu->iem.s.enmEffOpSize = enmMode;
1420 }
1421 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1422#ifndef IEM_WITH_CODE_TLB
1423 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1424 pVCpu->iem.s.offOpcode = 0;
1425 pVCpu->iem.s.cbOpcode = 0;
1426#endif
1427 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1428}
1429#endif
1430
1431/**
1432 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1433 *
1434 * @param pVCpu The cross context virtual CPU structure of the
1435 * calling thread.
1436 */
1437DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1438{
1439 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1440#ifdef VBOX_STRICT
1441# ifdef IEM_WITH_CODE_TLB
1442 NOREF(pVCpu);
1443# else
1444 pVCpu->iem.s.cbOpcode = 0;
1445# endif
1446#else
1447 NOREF(pVCpu);
1448#endif
1449}
1450
1451
1452/**
1453 * Initializes the decoder state.
1454 *
1455 * iemReInitDecoder is mostly a copy of this function.
1456 *
1457 * @param pVCpu The cross context virtual CPU structure of the
1458 * calling thread.
1459 * @param fBypassHandlers Whether to bypass access handlers.
1460 * @param fDisregardLock Whether to disregard the LOCK prefix.
1461 */
1462DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1463{
1464 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1465 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1466 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1467 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1468 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1469 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1470 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1471 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1472 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1474
1475 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1476 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1477 pVCpu->iem.s.enmCpuMode = enmMode;
1478 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1479 pVCpu->iem.s.enmEffAddrMode = enmMode;
1480 if (enmMode != IEMMODE_64BIT)
1481 {
1482 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1483 pVCpu->iem.s.enmEffOpSize = enmMode;
1484 }
1485 else
1486 {
1487 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1488 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1489 }
1490 pVCpu->iem.s.fPrefixes = 0;
1491 pVCpu->iem.s.uRexReg = 0;
1492 pVCpu->iem.s.uRexB = 0;
1493 pVCpu->iem.s.uRexIndex = 0;
1494 pVCpu->iem.s.idxPrefix = 0;
1495 pVCpu->iem.s.uVex3rdReg = 0;
1496 pVCpu->iem.s.uVexLength = 0;
1497 pVCpu->iem.s.fEvexStuff = 0;
1498 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1499#ifdef IEM_WITH_CODE_TLB
1500 pVCpu->iem.s.pbInstrBuf = NULL;
1501 pVCpu->iem.s.offInstrNextByte = 0;
1502 pVCpu->iem.s.offCurInstrStart = 0;
1503# ifdef VBOX_STRICT
1504 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1505 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1506 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1507# endif
1508#else
1509 pVCpu->iem.s.offOpcode = 0;
1510 pVCpu->iem.s.cbOpcode = 0;
1511#endif
1512 pVCpu->iem.s.offModRm = 0;
1513 pVCpu->iem.s.cActiveMappings = 0;
1514 pVCpu->iem.s.iNextMapping = 0;
1515 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1516 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1517 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1518
1519#ifdef DBGFTRACE_ENABLED
1520 switch (enmMode)
1521 {
1522 case IEMMODE_64BIT:
1523 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1524 break;
1525 case IEMMODE_32BIT:
1526 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1527 break;
1528 case IEMMODE_16BIT:
1529 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1530 break;
1531 }
1532#endif
1533}
1534
1535
1536/**
1537 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1538 *
1539 * This is mostly a copy of iemInitDecoder.
1540 *
1541 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1542 */
1543DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1544{
1545 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1546 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1547 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1548 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1549 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1550 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1551 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1552 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1553 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1554
1555 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1556 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1557 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1558 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1559 pVCpu->iem.s.enmEffAddrMode = enmMode;
1560 if (enmMode != IEMMODE_64BIT)
1561 {
1562 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1563 pVCpu->iem.s.enmEffOpSize = enmMode;
1564 }
1565 else
1566 {
1567 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1568 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1569 }
1570 pVCpu->iem.s.fPrefixes = 0;
1571 pVCpu->iem.s.uRexReg = 0;
1572 pVCpu->iem.s.uRexB = 0;
1573 pVCpu->iem.s.uRexIndex = 0;
1574 pVCpu->iem.s.idxPrefix = 0;
1575 pVCpu->iem.s.uVex3rdReg = 0;
1576 pVCpu->iem.s.uVexLength = 0;
1577 pVCpu->iem.s.fEvexStuff = 0;
1578 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1579#ifdef IEM_WITH_CODE_TLB
1580 if (pVCpu->iem.s.pbInstrBuf)
1581 {
1582 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1583 - pVCpu->iem.s.uInstrBufPc;
1584 if (off < pVCpu->iem.s.cbInstrBufTotal)
1585 {
1586 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1587 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1588 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1589 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1590 else
1591 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1592 }
1593 else
1594 {
1595 pVCpu->iem.s.pbInstrBuf = NULL;
1596 pVCpu->iem.s.offInstrNextByte = 0;
1597 pVCpu->iem.s.offCurInstrStart = 0;
1598 pVCpu->iem.s.cbInstrBuf = 0;
1599 pVCpu->iem.s.cbInstrBufTotal = 0;
1600 }
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.offInstrNextByte = 0;
1605 pVCpu->iem.s.offCurInstrStart = 0;
1606 pVCpu->iem.s.cbInstrBuf = 0;
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608 }
1609#else
1610 pVCpu->iem.s.cbOpcode = 0;
1611 pVCpu->iem.s.offOpcode = 0;
1612#endif
1613 pVCpu->iem.s.offModRm = 0;
1614 Assert(pVCpu->iem.s.cActiveMappings == 0);
1615 pVCpu->iem.s.iNextMapping = 0;
1616 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1617 Assert(pVCpu->iem.s.fBypassHandlers == false);
1618
1619#ifdef DBGFTRACE_ENABLED
1620 switch (enmMode)
1621 {
1622 case IEMMODE_64BIT:
1623 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1624 break;
1625 case IEMMODE_32BIT:
1626 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1627 break;
1628 case IEMMODE_16BIT:
1629 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1630 break;
1631 }
1632#endif
1633}
1634
1635
1636
1637/**
1638 * Prefetch opcodes the first time when starting executing.
1639 *
1640 * @returns Strict VBox status code.
1641 * @param pVCpu The cross context virtual CPU structure of the
1642 * calling thread.
1643 * @param fBypassHandlers Whether to bypass access handlers.
1644 * @param fDisregardLock Whether to disregard LOCK prefixes.
1645 *
1646 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1647 * store them as such.
1648 */
1649IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1650{
1651 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1652
1653#ifdef IEM_WITH_CODE_TLB
1654 /** @todo Do ITLB lookup here. */
1655
1656#else /* !IEM_WITH_CODE_TLB */
1657
1658 /*
1659 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1660 *
1661 * First translate CS:rIP to a physical address.
1662 */
1663 uint32_t cbToTryRead;
1664 RTGCPTR GCPtrPC;
1665 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1666 {
1667 cbToTryRead = GUEST_PAGE_SIZE;
1668 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1669 if (IEM_IS_CANONICAL(GCPtrPC))
1670 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1671 else
1672 return iemRaiseGeneralProtectionFault0(pVCpu);
1673 }
1674 else
1675 {
1676 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1677 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1678 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1679 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1680 else
1681 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1682 if (cbToTryRead) { /* likely */ }
1683 else /* overflowed */
1684 {
1685 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1686 cbToTryRead = UINT32_MAX;
1687 }
1688 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1689 Assert(GCPtrPC <= UINT32_MAX);
1690 }
1691
1692 PGMPTWALK Walk;
1693 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1694 if (RT_SUCCESS(rc))
1695 Assert(Walk.fSucceeded); /* probable. */
1696 else
1697 {
1698 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1699#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1700 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1701 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1702#endif
1703 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1704 }
1705 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1706 else
1707 {
1708 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1709#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1710 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1711 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1712#endif
1713 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1714 }
1715 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1716 else
1717 {
1718 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1719#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1720 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1721 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1722#endif
1723 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1724 }
1725 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1726 /** @todo Check reserved bits and such stuff. PGM is better at doing
1727 * that, so do it when implementing the guest virtual address
1728 * TLB... */
1729
1730 /*
1731 * Read the bytes at this address.
1732 */
1733 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1734 if (cbToTryRead > cbLeftOnPage)
1735 cbToTryRead = cbLeftOnPage;
1736 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1737 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1738
1739 if (!pVCpu->iem.s.fBypassHandlers)
1740 {
1741 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1742 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1743 { /* likely */ }
1744 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1745 {
1746 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1747 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1748 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1749 }
1750 else
1751 {
1752 Log((RT_SUCCESS(rcStrict)
1753 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1754 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1755 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1756 return rcStrict;
1757 }
1758 }
1759 else
1760 {
1761 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1762 if (RT_SUCCESS(rc))
1763 { /* likely */ }
1764 else
1765 {
1766 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1767 GCPtrPC, GCPhys, rc, cbToTryRead));
1768 return rc;
1769 }
1770 }
1771 pVCpu->iem.s.cbOpcode = cbToTryRead;
1772#endif /* !IEM_WITH_CODE_TLB */
1773 return VINF_SUCCESS;
1774}
1775
1776
1777/**
1778 * Invalidates the IEM TLBs.
1779 *
1780 * This is called internally as well as by PGM when moving GC mappings.
1781 *
1782 * @returns
1783 * @param pVCpu The cross context virtual CPU structure of the calling
1784 * thread.
1785 * @param fVmm Set when PGM calls us with a remapping.
1786 */
1787VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1788{
1789#ifdef IEM_WITH_CODE_TLB
1790 pVCpu->iem.s.cbInstrBufTotal = 0;
1791 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1792 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1793 { /* very likely */ }
1794 else
1795 {
1796 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1797 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1798 while (i-- > 0)
1799 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1800 }
1801#endif
1802
1803#ifdef IEM_WITH_DATA_TLB
1804 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1805 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1806 { /* very likely */ }
1807 else
1808 {
1809 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1810 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1811 while (i-- > 0)
1812 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1813 }
1814#endif
1815 NOREF(pVCpu); NOREF(fVmm);
1816}
1817
1818
1819/**
1820 * Invalidates a page in the TLBs.
1821 *
1822 * @param pVCpu The cross context virtual CPU structure of the calling
1823 * thread.
1824 * @param GCPtr The address of the page to invalidate
1825 */
1826VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1827{
1828#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1829 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1830 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1831 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1832 uintptr_t idx = (uint8_t)GCPtr;
1833
1834# ifdef IEM_WITH_CODE_TLB
1835 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1836 {
1837 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1838 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1839 pVCpu->iem.s.cbInstrBufTotal = 0;
1840 }
1841# endif
1842
1843# ifdef IEM_WITH_DATA_TLB
1844 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1845 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1846# endif
1847#else
1848 NOREF(pVCpu); NOREF(GCPtr);
1849#endif
1850}
1851
1852
1853/**
1854 * Invalidates the host physical aspects of the IEM TLBs.
1855 *
1856 * This is called internally as well as by PGM when moving GC mappings.
1857 *
1858 * @param pVCpu The cross context virtual CPU structure of the calling
1859 * thread.
1860 */
1861VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1862{
1863#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1864 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1865
1866# ifdef IEM_WITH_CODE_TLB
1867 pVCpu->iem.s.cbInstrBufTotal = 0;
1868# endif
1869 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1870 if (uTlbPhysRev != 0)
1871 {
1872 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1873 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1874 }
1875 else
1876 {
1877 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1878 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1879
1880 unsigned i;
1881# ifdef IEM_WITH_CODE_TLB
1882 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1883 while (i-- > 0)
1884 {
1885 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1886 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1887 }
1888# endif
1889# ifdef IEM_WITH_DATA_TLB
1890 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1891 while (i-- > 0)
1892 {
1893 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1894 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1895 }
1896# endif
1897 }
1898#else
1899 NOREF(pVCpu);
1900#endif
1901}
1902
1903
1904/**
1905 * Invalidates the host physical aspects of the IEM TLBs.
1906 *
1907 * This is called internally as well as by PGM when moving GC mappings.
1908 *
1909 * @param pVM The cross context VM structure.
1910 *
1911 * @remarks Caller holds the PGM lock.
1912 */
1913VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1914{
1915 RT_NOREF_PV(pVM);
1916}
1917
1918#ifdef IEM_WITH_CODE_TLB
1919
1920/**
1921 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1922 * failure and jumps.
1923 *
1924 * We end up here for a number of reasons:
1925 * - pbInstrBuf isn't yet initialized.
1926 * - Advancing beyond the buffer boundrary (e.g. cross page).
1927 * - Advancing beyond the CS segment limit.
1928 * - Fetching from non-mappable page (e.g. MMIO).
1929 *
1930 * @param pVCpu The cross context virtual CPU structure of the
1931 * calling thread.
1932 * @param pvDst Where to return the bytes.
1933 * @param cbDst Number of bytes to read.
1934 *
1935 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1936 */
1937IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1938{
1939#ifdef IN_RING3
1940 for (;;)
1941 {
1942 Assert(cbDst <= 8);
1943 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1944
1945 /*
1946 * We might have a partial buffer match, deal with that first to make the
1947 * rest simpler. This is the first part of the cross page/buffer case.
1948 */
1949 if (pVCpu->iem.s.pbInstrBuf != NULL)
1950 {
1951 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1952 {
1953 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1954 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1955 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1956
1957 cbDst -= cbCopy;
1958 pvDst = (uint8_t *)pvDst + cbCopy;
1959 offBuf += cbCopy;
1960 pVCpu->iem.s.offInstrNextByte += offBuf;
1961 }
1962 }
1963
1964 /*
1965 * Check segment limit, figuring how much we're allowed to access at this point.
1966 *
1967 * We will fault immediately if RIP is past the segment limit / in non-canonical
1968 * territory. If we do continue, there are one or more bytes to read before we
1969 * end up in trouble and we need to do that first before faulting.
1970 */
1971 RTGCPTR GCPtrFirst;
1972 uint32_t cbMaxRead;
1973 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1974 {
1975 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1976 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1977 { /* likely */ }
1978 else
1979 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1980 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1981 }
1982 else
1983 {
1984 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1985 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1986 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1987 { /* likely */ }
1988 else
1989 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1990 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1991 if (cbMaxRead != 0)
1992 { /* likely */ }
1993 else
1994 {
1995 /* Overflowed because address is 0 and limit is max. */
1996 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1997 cbMaxRead = X86_PAGE_SIZE;
1998 }
1999 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
2000 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
2001 if (cbMaxRead2 < cbMaxRead)
2002 cbMaxRead = cbMaxRead2;
2003 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
2004 }
2005
2006 /*
2007 * Get the TLB entry for this piece of code.
2008 */
2009 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
2010 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
2011 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
2012 if (pTlbe->uTag == uTag)
2013 {
2014 /* likely when executing lots of code, otherwise unlikely */
2015# ifdef VBOX_WITH_STATISTICS
2016 pVCpu->iem.s.CodeTlb.cTlbHits++;
2017# endif
2018 }
2019 else
2020 {
2021 pVCpu->iem.s.CodeTlb.cTlbMisses++;
2022 PGMPTWALK Walk;
2023 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
2024 if (RT_FAILURE(rc))
2025 {
2026#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2027 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
2028 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
2029#endif
2030 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
2031 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
2032 }
2033
2034 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
2035 Assert(Walk.fSucceeded);
2036 pTlbe->uTag = uTag;
2037 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
2038 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
2039 pTlbe->GCPhys = Walk.GCPhys;
2040 pTlbe->pbMappingR3 = NULL;
2041 }
2042
2043 /*
2044 * Check TLB page table level access flags.
2045 */
2046 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
2047 {
2048 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
2049 {
2050 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
2051 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2052 }
2053 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2054 {
2055 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
2056 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2057 }
2058 }
2059
2060 /*
2061 * Look up the physical page info if necessary.
2062 */
2063 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2064 { /* not necessary */ }
2065 else
2066 {
2067 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
2068 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
2069 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
2070 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
2071 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
2072 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
2073 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
2074 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
2075 }
2076
2077# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
2078 /*
2079 * Try do a direct read using the pbMappingR3 pointer.
2080 */
2081 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
2082 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2083 {
2084 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
2085 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
2086 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
2087 {
2088 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
2089 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
2090 }
2091 else
2092 {
2093 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
2094 Assert(cbInstr < cbMaxRead);
2095 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
2096 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
2097 }
2098 if (cbDst <= cbMaxRead)
2099 {
2100 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
2101 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
2102 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
2103 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
2104 return;
2105 }
2106 pVCpu->iem.s.pbInstrBuf = NULL;
2107
2108 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
2109 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
2110 }
2111 else
2112# endif
2113#if 0
2114 /*
2115 * If there is no special read handling, so we can read a bit more and
2116 * put it in the prefetch buffer.
2117 */
2118 if ( cbDst < cbMaxRead
2119 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2120 {
2121 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
2122 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
2123 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2124 { /* likely */ }
2125 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2126 {
2127 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2128 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2129 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2130 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
2131 }
2132 else
2133 {
2134 Log((RT_SUCCESS(rcStrict)
2135 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2136 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2137 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2138 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2139 }
2140 }
2141 /*
2142 * Special read handling, so only read exactly what's needed.
2143 * This is a highly unlikely scenario.
2144 */
2145 else
2146#endif
2147 {
2148 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
2149 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
2150 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
2151 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
2152 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2153 { /* likely */ }
2154 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2155 {
2156 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2157 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
2158 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2159 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
2160 }
2161 else
2162 {
2163 Log((RT_SUCCESS(rcStrict)
2164 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2165 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2166 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
2167 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2168 }
2169 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
2170 if (cbToRead == cbDst)
2171 return;
2172 }
2173
2174 /*
2175 * More to read, loop.
2176 */
2177 cbDst -= cbMaxRead;
2178 pvDst = (uint8_t *)pvDst + cbMaxRead;
2179 }
2180#else
2181 RT_NOREF(pvDst, cbDst);
2182 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
2183#endif
2184}
2185
2186#else
2187
2188/**
2189 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2190 * exception if it fails.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the
2194 * calling thread.
2195 * @param cbMin The minimum number of bytes relative offOpcode
2196 * that must be read.
2197 */
2198IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
2199{
2200 /*
2201 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2202 *
2203 * First translate CS:rIP to a physical address.
2204 */
2205 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2206 uint32_t cbToTryRead;
2207 RTGCPTR GCPtrNext;
2208 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2209 {
2210 cbToTryRead = GUEST_PAGE_SIZE;
2211 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2212 if (!IEM_IS_CANONICAL(GCPtrNext))
2213 return iemRaiseGeneralProtectionFault0(pVCpu);
2214 }
2215 else
2216 {
2217 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2218 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2219 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2220 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2221 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2222 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2223 if (!cbToTryRead) /* overflowed */
2224 {
2225 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2226 cbToTryRead = UINT32_MAX;
2227 /** @todo check out wrapping around the code segment. */
2228 }
2229 if (cbToTryRead < cbMin - cbLeft)
2230 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2231 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2232 }
2233
2234 /* Only read up to the end of the page, and make sure we don't read more
2235 than the opcode buffer can hold. */
2236 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
2237 if (cbToTryRead > cbLeftOnPage)
2238 cbToTryRead = cbLeftOnPage;
2239 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2240 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2241/** @todo r=bird: Convert assertion into undefined opcode exception? */
2242 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2243
2244 PGMPTWALK Walk;
2245 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
2246 if (RT_FAILURE(rc))
2247 {
2248 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2249#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2250 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2251 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
2252#endif
2253 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2254 }
2255 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2256 {
2257 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2259 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2260 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2261#endif
2262 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2263 }
2264 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2265 {
2266 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2268 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2269 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2270#endif
2271 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2272 }
2273 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
2274 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2275 /** @todo Check reserved bits and such stuff. PGM is better at doing
2276 * that, so do it when implementing the guest virtual address
2277 * TLB... */
2278
2279 /*
2280 * Read the bytes at this address.
2281 *
2282 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2283 * and since PATM should only patch the start of an instruction there
2284 * should be no need to check again here.
2285 */
2286 if (!pVCpu->iem.s.fBypassHandlers)
2287 {
2288 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2289 cbToTryRead, PGMACCESSORIGIN_IEM);
2290 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2291 { /* likely */ }
2292 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2293 {
2294 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2295 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2296 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2297 }
2298 else
2299 {
2300 Log((RT_SUCCESS(rcStrict)
2301 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2302 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2303 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2304 return rcStrict;
2305 }
2306 }
2307 else
2308 {
2309 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2310 if (RT_SUCCESS(rc))
2311 { /* likely */ }
2312 else
2313 {
2314 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2315 return rc;
2316 }
2317 }
2318 pVCpu->iem.s.cbOpcode += cbToTryRead;
2319 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2320
2321 return VINF_SUCCESS;
2322}
2323
2324#endif /* !IEM_WITH_CODE_TLB */
2325#ifndef IEM_WITH_SETJMP
2326
2327/**
2328 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2329 *
2330 * @returns Strict VBox status code.
2331 * @param pVCpu The cross context virtual CPU structure of the
2332 * calling thread.
2333 * @param pb Where to return the opcode byte.
2334 */
2335DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2336{
2337 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2338 if (rcStrict == VINF_SUCCESS)
2339 {
2340 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2341 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2342 pVCpu->iem.s.offOpcode = offOpcode + 1;
2343 }
2344 else
2345 *pb = 0;
2346 return rcStrict;
2347}
2348
2349
2350/**
2351 * Fetches the next opcode byte.
2352 *
2353 * @returns Strict VBox status code.
2354 * @param pVCpu The cross context virtual CPU structure of the
2355 * calling thread.
2356 * @param pu8 Where to return the opcode byte.
2357 */
2358DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2359{
2360 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2361 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2362 {
2363 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2364 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2365 return VINF_SUCCESS;
2366 }
2367 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2368}
2369
2370#else /* IEM_WITH_SETJMP */
2371
2372/**
2373 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2374 *
2375 * @returns The opcode byte.
2376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2377 */
2378DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2379{
2380# ifdef IEM_WITH_CODE_TLB
2381 uint8_t u8;
2382 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2383 return u8;
2384# else
2385 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2386 if (rcStrict == VINF_SUCCESS)
2387 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2388 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2389# endif
2390}
2391
2392
2393/**
2394 * Fetches the next opcode byte, longjmp on error.
2395 *
2396 * @returns The opcode byte.
2397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2398 */
2399DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2400{
2401# ifdef IEM_WITH_CODE_TLB
2402 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2403 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2404 if (RT_LIKELY( pbBuf != NULL
2405 && offBuf < pVCpu->iem.s.cbInstrBuf))
2406 {
2407 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2408 return pbBuf[offBuf];
2409 }
2410# else
2411 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2412 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2413 {
2414 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2415 return pVCpu->iem.s.abOpcode[offOpcode];
2416 }
2417# endif
2418 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2419}
2420
2421#endif /* IEM_WITH_SETJMP */
2422
2423/**
2424 * Fetches the next opcode byte, returns automatically on failure.
2425 *
2426 * @param a_pu8 Where to return the opcode byte.
2427 * @remark Implicitly references pVCpu.
2428 */
2429#ifndef IEM_WITH_SETJMP
2430# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2431 do \
2432 { \
2433 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2434 if (rcStrict2 == VINF_SUCCESS) \
2435 { /* likely */ } \
2436 else \
2437 return rcStrict2; \
2438 } while (0)
2439#else
2440# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2441#endif /* IEM_WITH_SETJMP */
2442
2443
2444#ifndef IEM_WITH_SETJMP
2445/**
2446 * Fetches the next signed byte from the opcode stream.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pi8 Where to return the signed byte.
2451 */
2452DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2453{
2454 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2455}
2456#endif /* !IEM_WITH_SETJMP */
2457
2458
2459/**
2460 * Fetches the next signed byte from the opcode stream, returning automatically
2461 * on failure.
2462 *
2463 * @param a_pi8 Where to return the signed byte.
2464 * @remark Implicitly references pVCpu.
2465 */
2466#ifndef IEM_WITH_SETJMP
2467# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2468 do \
2469 { \
2470 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2471 if (rcStrict2 != VINF_SUCCESS) \
2472 return rcStrict2; \
2473 } while (0)
2474#else /* IEM_WITH_SETJMP */
2475# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2476
2477#endif /* IEM_WITH_SETJMP */
2478
2479#ifndef IEM_WITH_SETJMP
2480
2481/**
2482 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2483 *
2484 * @returns Strict VBox status code.
2485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2486 * @param pu16 Where to return the opcode dword.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2489{
2490 uint8_t u8;
2491 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2492 if (rcStrict == VINF_SUCCESS)
2493 *pu16 = (int8_t)u8;
2494 return rcStrict;
2495}
2496
2497
2498/**
2499 * Fetches the next signed byte from the opcode stream, extending it to
2500 * unsigned 16-bit.
2501 *
2502 * @returns Strict VBox status code.
2503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2504 * @param pu16 Where to return the unsigned word.
2505 */
2506DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2507{
2508 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2509 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2510 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2511
2512 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2513 pVCpu->iem.s.offOpcode = offOpcode + 1;
2514 return VINF_SUCCESS;
2515}
2516
2517#endif /* !IEM_WITH_SETJMP */
2518
2519/**
2520 * Fetches the next signed byte from the opcode stream and sign-extending it to
2521 * a word, returning automatically on failure.
2522 *
2523 * @param a_pu16 Where to return the word.
2524 * @remark Implicitly references pVCpu.
2525 */
2526#ifndef IEM_WITH_SETJMP
2527# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2528 do \
2529 { \
2530 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2531 if (rcStrict2 != VINF_SUCCESS) \
2532 return rcStrict2; \
2533 } while (0)
2534#else
2535# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2536#endif
2537
2538#ifndef IEM_WITH_SETJMP
2539
2540/**
2541 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2542 *
2543 * @returns Strict VBox status code.
2544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2545 * @param pu32 Where to return the opcode dword.
2546 */
2547DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2548{
2549 uint8_t u8;
2550 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2551 if (rcStrict == VINF_SUCCESS)
2552 *pu32 = (int8_t)u8;
2553 return rcStrict;
2554}
2555
2556
2557/**
2558 * Fetches the next signed byte from the opcode stream, extending it to
2559 * unsigned 32-bit.
2560 *
2561 * @returns Strict VBox status code.
2562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2563 * @param pu32 Where to return the unsigned dword.
2564 */
2565DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2566{
2567 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2568 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2569 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2570
2571 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2572 pVCpu->iem.s.offOpcode = offOpcode + 1;
2573 return VINF_SUCCESS;
2574}
2575
2576#endif /* !IEM_WITH_SETJMP */
2577
2578/**
2579 * Fetches the next signed byte from the opcode stream and sign-extending it to
2580 * a word, returning automatically on failure.
2581 *
2582 * @param a_pu32 Where to return the word.
2583 * @remark Implicitly references pVCpu.
2584 */
2585#ifndef IEM_WITH_SETJMP
2586#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2587 do \
2588 { \
2589 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2590 if (rcStrict2 != VINF_SUCCESS) \
2591 return rcStrict2; \
2592 } while (0)
2593#else
2594# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2595#endif
2596
2597#ifndef IEM_WITH_SETJMP
2598
2599/**
2600 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param pu64 Where to return the opcode qword.
2605 */
2606DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2607{
2608 uint8_t u8;
2609 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2610 if (rcStrict == VINF_SUCCESS)
2611 *pu64 = (int8_t)u8;
2612 return rcStrict;
2613}
2614
2615
2616/**
2617 * Fetches the next signed byte from the opcode stream, extending it to
2618 * unsigned 64-bit.
2619 *
2620 * @returns Strict VBox status code.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param pu64 Where to return the unsigned qword.
2623 */
2624DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2625{
2626 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2627 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2628 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2629
2630 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2631 pVCpu->iem.s.offOpcode = offOpcode + 1;
2632 return VINF_SUCCESS;
2633}
2634
2635#endif /* !IEM_WITH_SETJMP */
2636
2637
2638/**
2639 * Fetches the next signed byte from the opcode stream and sign-extending it to
2640 * a word, returning automatically on failure.
2641 *
2642 * @param a_pu64 Where to return the word.
2643 * @remark Implicitly references pVCpu.
2644 */
2645#ifndef IEM_WITH_SETJMP
2646# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2647 do \
2648 { \
2649 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2650 if (rcStrict2 != VINF_SUCCESS) \
2651 return rcStrict2; \
2652 } while (0)
2653#else
2654# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2655#endif
2656
2657
2658#ifndef IEM_WITH_SETJMP
2659/**
2660 * Fetches the next opcode byte.
2661 *
2662 * @returns Strict VBox status code.
2663 * @param pVCpu The cross context virtual CPU structure of the
2664 * calling thread.
2665 * @param pu8 Where to return the opcode byte.
2666 */
2667DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2668{
2669 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2670 pVCpu->iem.s.offModRm = offOpcode;
2671 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2672 {
2673 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2674 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2675 return VINF_SUCCESS;
2676 }
2677 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2678}
2679#else /* IEM_WITH_SETJMP */
2680/**
2681 * Fetches the next opcode byte, longjmp on error.
2682 *
2683 * @returns The opcode byte.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 */
2686DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2687{
2688# ifdef IEM_WITH_CODE_TLB
2689 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2690 pVCpu->iem.s.offModRm = offBuf;
2691 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2692 if (RT_LIKELY( pbBuf != NULL
2693 && offBuf < pVCpu->iem.s.cbInstrBuf))
2694 {
2695 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2696 return pbBuf[offBuf];
2697 }
2698# else
2699 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2700 pVCpu->iem.s.offModRm = offOpcode;
2701 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2702 {
2703 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2704 return pVCpu->iem.s.abOpcode[offOpcode];
2705 }
2706# endif
2707 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2708}
2709#endif /* IEM_WITH_SETJMP */
2710
2711/**
2712 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2713 * on failure.
2714 *
2715 * Will note down the position of the ModR/M byte for VT-x exits.
2716 *
2717 * @param a_pbRm Where to return the RM opcode byte.
2718 * @remark Implicitly references pVCpu.
2719 */
2720#ifndef IEM_WITH_SETJMP
2721# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2722 do \
2723 { \
2724 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2725 if (rcStrict2 == VINF_SUCCESS) \
2726 { /* likely */ } \
2727 else \
2728 return rcStrict2; \
2729 } while (0)
2730#else
2731# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2732#endif /* IEM_WITH_SETJMP */
2733
2734
2735#ifndef IEM_WITH_SETJMP
2736
2737/**
2738 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2739 *
2740 * @returns Strict VBox status code.
2741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2742 * @param pu16 Where to return the opcode word.
2743 */
2744DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2745{
2746 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2747 if (rcStrict == VINF_SUCCESS)
2748 {
2749 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2750# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2751 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2752# else
2753 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2754# endif
2755 pVCpu->iem.s.offOpcode = offOpcode + 2;
2756 }
2757 else
2758 *pu16 = 0;
2759 return rcStrict;
2760}
2761
2762
2763/**
2764 * Fetches the next opcode word.
2765 *
2766 * @returns Strict VBox status code.
2767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2768 * @param pu16 Where to return the opcode word.
2769 */
2770DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2771{
2772 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2773 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2774 {
2775 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2776# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2777 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2778# else
2779 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2780# endif
2781 return VINF_SUCCESS;
2782 }
2783 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2784}
2785
2786#else /* IEM_WITH_SETJMP */
2787
2788/**
2789 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2790 *
2791 * @returns The opcode word.
2792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2793 */
2794DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2795{
2796# ifdef IEM_WITH_CODE_TLB
2797 uint16_t u16;
2798 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2799 return u16;
2800# else
2801 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2802 if (rcStrict == VINF_SUCCESS)
2803 {
2804 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2805 pVCpu->iem.s.offOpcode += 2;
2806# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2807 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2808# else
2809 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2810# endif
2811 }
2812 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2813# endif
2814}
2815
2816
2817/**
2818 * Fetches the next opcode word, longjmp on error.
2819 *
2820 * @returns The opcode word.
2821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2822 */
2823DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2824{
2825# ifdef IEM_WITH_CODE_TLB
2826 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2827 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2828 if (RT_LIKELY( pbBuf != NULL
2829 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2830 {
2831 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2832# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2833 return *(uint16_t const *)&pbBuf[offBuf];
2834# else
2835 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2836# endif
2837 }
2838# else
2839 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2840 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2841 {
2842 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2843# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2844 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2845# else
2846 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2847# endif
2848 }
2849# endif
2850 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2851}
2852
2853#endif /* IEM_WITH_SETJMP */
2854
2855
2856/**
2857 * Fetches the next opcode word, returns automatically on failure.
2858 *
2859 * @param a_pu16 Where to return the opcode word.
2860 * @remark Implicitly references pVCpu.
2861 */
2862#ifndef IEM_WITH_SETJMP
2863# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2864 do \
2865 { \
2866 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2867 if (rcStrict2 != VINF_SUCCESS) \
2868 return rcStrict2; \
2869 } while (0)
2870#else
2871# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2872#endif
2873
2874#ifndef IEM_WITH_SETJMP
2875
2876/**
2877 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2878 *
2879 * @returns Strict VBox status code.
2880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2881 * @param pu32 Where to return the opcode double word.
2882 */
2883DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2884{
2885 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2886 if (rcStrict == VINF_SUCCESS)
2887 {
2888 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2889 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 2;
2891 }
2892 else
2893 *pu32 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode word, zero extending it to a double word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu32 Where to return the opcode double word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2910
2911 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2912 pVCpu->iem.s.offOpcode = offOpcode + 2;
2913 return VINF_SUCCESS;
2914}
2915
2916#endif /* !IEM_WITH_SETJMP */
2917
2918
2919/**
2920 * Fetches the next opcode word and zero extends it to a double word, returns
2921 * automatically on failure.
2922 *
2923 * @param a_pu32 Where to return the opcode double word.
2924 * @remark Implicitly references pVCpu.
2925 */
2926#ifndef IEM_WITH_SETJMP
2927# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2928 do \
2929 { \
2930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2931 if (rcStrict2 != VINF_SUCCESS) \
2932 return rcStrict2; \
2933 } while (0)
2934#else
2935# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2936#endif
2937
2938#ifndef IEM_WITH_SETJMP
2939
2940/**
2941 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2942 *
2943 * @returns Strict VBox status code.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param pu64 Where to return the opcode quad word.
2946 */
2947DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2948{
2949 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2950 if (rcStrict == VINF_SUCCESS)
2951 {
2952 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2953 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2954 pVCpu->iem.s.offOpcode = offOpcode + 2;
2955 }
2956 else
2957 *pu64 = 0;
2958 return rcStrict;
2959}
2960
2961
2962/**
2963 * Fetches the next opcode word, zero extending it to a quad word.
2964 *
2965 * @returns Strict VBox status code.
2966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2967 * @param pu64 Where to return the opcode quad word.
2968 */
2969DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2970{
2971 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2972 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2973 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2974
2975 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2976 pVCpu->iem.s.offOpcode = offOpcode + 2;
2977 return VINF_SUCCESS;
2978}
2979
2980#endif /* !IEM_WITH_SETJMP */
2981
2982/**
2983 * Fetches the next opcode word and zero extends it to a quad word, returns
2984 * automatically on failure.
2985 *
2986 * @param a_pu64 Where to return the opcode quad word.
2987 * @remark Implicitly references pVCpu.
2988 */
2989#ifndef IEM_WITH_SETJMP
2990# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2991 do \
2992 { \
2993 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2994 if (rcStrict2 != VINF_SUCCESS) \
2995 return rcStrict2; \
2996 } while (0)
2997#else
2998# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2999#endif
3000
3001
3002#ifndef IEM_WITH_SETJMP
3003/**
3004 * Fetches the next signed word from the opcode stream.
3005 *
3006 * @returns Strict VBox status code.
3007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3008 * @param pi16 Where to return the signed word.
3009 */
3010DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
3011{
3012 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
3013}
3014#endif /* !IEM_WITH_SETJMP */
3015
3016
3017/**
3018 * Fetches the next signed word from the opcode stream, returning automatically
3019 * on failure.
3020 *
3021 * @param a_pi16 Where to return the signed word.
3022 * @remark Implicitly references pVCpu.
3023 */
3024#ifndef IEM_WITH_SETJMP
3025# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
3026 do \
3027 { \
3028 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
3029 if (rcStrict2 != VINF_SUCCESS) \
3030 return rcStrict2; \
3031 } while (0)
3032#else
3033# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
3034#endif
3035
3036#ifndef IEM_WITH_SETJMP
3037
3038/**
3039 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
3040 *
3041 * @returns Strict VBox status code.
3042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3043 * @param pu32 Where to return the opcode dword.
3044 */
3045DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
3046{
3047 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3048 if (rcStrict == VINF_SUCCESS)
3049 {
3050 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3051# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3052 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3053# else
3054 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3055 pVCpu->iem.s.abOpcode[offOpcode + 1],
3056 pVCpu->iem.s.abOpcode[offOpcode + 2],
3057 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3058# endif
3059 pVCpu->iem.s.offOpcode = offOpcode + 4;
3060 }
3061 else
3062 *pu32 = 0;
3063 return rcStrict;
3064}
3065
3066
3067/**
3068 * Fetches the next opcode dword.
3069 *
3070 * @returns Strict VBox status code.
3071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3072 * @param pu32 Where to return the opcode double word.
3073 */
3074DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
3075{
3076 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3077 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
3078 {
3079 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
3080# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3081 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3082# else
3083 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3084 pVCpu->iem.s.abOpcode[offOpcode + 1],
3085 pVCpu->iem.s.abOpcode[offOpcode + 2],
3086 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3087# endif
3088 return VINF_SUCCESS;
3089 }
3090 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
3091}
3092
3093#else /* !IEM_WITH_SETJMP */
3094
3095/**
3096 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
3097 *
3098 * @returns The opcode dword.
3099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3100 */
3101DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
3102{
3103# ifdef IEM_WITH_CODE_TLB
3104 uint32_t u32;
3105 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
3106 return u32;
3107# else
3108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3109 if (rcStrict == VINF_SUCCESS)
3110 {
3111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3112 pVCpu->iem.s.offOpcode = offOpcode + 4;
3113# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3114 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3115# else
3116 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3117 pVCpu->iem.s.abOpcode[offOpcode + 1],
3118 pVCpu->iem.s.abOpcode[offOpcode + 2],
3119 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3120# endif
3121 }
3122 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3123# endif
3124}
3125
3126
3127/**
3128 * Fetches the next opcode dword, longjmp on error.
3129 *
3130 * @returns The opcode dword.
3131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3132 */
3133DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
3134{
3135# ifdef IEM_WITH_CODE_TLB
3136 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3137 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3138 if (RT_LIKELY( pbBuf != NULL
3139 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
3140 {
3141 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
3142# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3143 return *(uint32_t const *)&pbBuf[offBuf];
3144# else
3145 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
3146 pbBuf[offBuf + 1],
3147 pbBuf[offBuf + 2],
3148 pbBuf[offBuf + 3]);
3149# endif
3150 }
3151# else
3152 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3153 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
3154 {
3155 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
3156# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3157 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3158# else
3159 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3160 pVCpu->iem.s.abOpcode[offOpcode + 1],
3161 pVCpu->iem.s.abOpcode[offOpcode + 2],
3162 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3163# endif
3164 }
3165# endif
3166 return iemOpcodeGetNextU32SlowJmp(pVCpu);
3167}
3168
3169#endif /* !IEM_WITH_SETJMP */
3170
3171
3172/**
3173 * Fetches the next opcode dword, returns automatically on failure.
3174 *
3175 * @param a_pu32 Where to return the opcode dword.
3176 * @remark Implicitly references pVCpu.
3177 */
3178#ifndef IEM_WITH_SETJMP
3179# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
3180 do \
3181 { \
3182 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
3183 if (rcStrict2 != VINF_SUCCESS) \
3184 return rcStrict2; \
3185 } while (0)
3186#else
3187# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3188#endif
3189
3190#ifndef IEM_WITH_SETJMP
3191
3192/**
3193 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3194 *
3195 * @returns Strict VBox status code.
3196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3197 * @param pu64 Where to return the opcode dword.
3198 */
3199DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3200{
3201 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3202 if (rcStrict == VINF_SUCCESS)
3203 {
3204 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3205 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3206 pVCpu->iem.s.abOpcode[offOpcode + 1],
3207 pVCpu->iem.s.abOpcode[offOpcode + 2],
3208 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3209 pVCpu->iem.s.offOpcode = offOpcode + 4;
3210 }
3211 else
3212 *pu64 = 0;
3213 return rcStrict;
3214}
3215
3216
3217/**
3218 * Fetches the next opcode dword, zero extending it to a quad word.
3219 *
3220 * @returns Strict VBox status code.
3221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3222 * @param pu64 Where to return the opcode quad word.
3223 */
3224DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3225{
3226 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3227 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3228 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3229
3230 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3231 pVCpu->iem.s.abOpcode[offOpcode + 1],
3232 pVCpu->iem.s.abOpcode[offOpcode + 2],
3233 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3234 pVCpu->iem.s.offOpcode = offOpcode + 4;
3235 return VINF_SUCCESS;
3236}
3237
3238#endif /* !IEM_WITH_SETJMP */
3239
3240
3241/**
3242 * Fetches the next opcode dword and zero extends it to a quad word, returns
3243 * automatically on failure.
3244 *
3245 * @param a_pu64 Where to return the opcode quad word.
3246 * @remark Implicitly references pVCpu.
3247 */
3248#ifndef IEM_WITH_SETJMP
3249# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3250 do \
3251 { \
3252 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3253 if (rcStrict2 != VINF_SUCCESS) \
3254 return rcStrict2; \
3255 } while (0)
3256#else
3257# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3258#endif
3259
3260
3261#ifndef IEM_WITH_SETJMP
3262/**
3263 * Fetches the next signed double word from the opcode stream.
3264 *
3265 * @returns Strict VBox status code.
3266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3267 * @param pi32 Where to return the signed double word.
3268 */
3269DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
3270{
3271 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3272}
3273#endif
3274
3275/**
3276 * Fetches the next signed double word from the opcode stream, returning
3277 * automatically on failure.
3278 *
3279 * @param a_pi32 Where to return the signed double word.
3280 * @remark Implicitly references pVCpu.
3281 */
3282#ifndef IEM_WITH_SETJMP
3283# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3284 do \
3285 { \
3286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3287 if (rcStrict2 != VINF_SUCCESS) \
3288 return rcStrict2; \
3289 } while (0)
3290#else
3291# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3292#endif
3293
3294#ifndef IEM_WITH_SETJMP
3295
3296/**
3297 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3298 *
3299 * @returns Strict VBox status code.
3300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3301 * @param pu64 Where to return the opcode qword.
3302 */
3303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3304{
3305 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3306 if (rcStrict == VINF_SUCCESS)
3307 {
3308 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3309 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3310 pVCpu->iem.s.abOpcode[offOpcode + 1],
3311 pVCpu->iem.s.abOpcode[offOpcode + 2],
3312 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3313 pVCpu->iem.s.offOpcode = offOpcode + 4;
3314 }
3315 else
3316 *pu64 = 0;
3317 return rcStrict;
3318}
3319
3320
3321/**
3322 * Fetches the next opcode dword, sign extending it into a quad word.
3323 *
3324 * @returns Strict VBox status code.
3325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3326 * @param pu64 Where to return the opcode quad word.
3327 */
3328DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3329{
3330 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3331 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3332 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3333
3334 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3335 pVCpu->iem.s.abOpcode[offOpcode + 1],
3336 pVCpu->iem.s.abOpcode[offOpcode + 2],
3337 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3338 *pu64 = i32;
3339 pVCpu->iem.s.offOpcode = offOpcode + 4;
3340 return VINF_SUCCESS;
3341}
3342
3343#endif /* !IEM_WITH_SETJMP */
3344
3345
3346/**
3347 * Fetches the next opcode double word and sign extends it to a quad word,
3348 * returns automatically on failure.
3349 *
3350 * @param a_pu64 Where to return the opcode quad word.
3351 * @remark Implicitly references pVCpu.
3352 */
3353#ifndef IEM_WITH_SETJMP
3354# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3355 do \
3356 { \
3357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3358 if (rcStrict2 != VINF_SUCCESS) \
3359 return rcStrict2; \
3360 } while (0)
3361#else
3362# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3363#endif
3364
3365#ifndef IEM_WITH_SETJMP
3366
3367/**
3368 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3369 *
3370 * @returns Strict VBox status code.
3371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3372 * @param pu64 Where to return the opcode qword.
3373 */
3374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3375{
3376 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3377 if (rcStrict == VINF_SUCCESS)
3378 {
3379 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3380# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3381 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3382# else
3383 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3384 pVCpu->iem.s.abOpcode[offOpcode + 1],
3385 pVCpu->iem.s.abOpcode[offOpcode + 2],
3386 pVCpu->iem.s.abOpcode[offOpcode + 3],
3387 pVCpu->iem.s.abOpcode[offOpcode + 4],
3388 pVCpu->iem.s.abOpcode[offOpcode + 5],
3389 pVCpu->iem.s.abOpcode[offOpcode + 6],
3390 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3391# endif
3392 pVCpu->iem.s.offOpcode = offOpcode + 8;
3393 }
3394 else
3395 *pu64 = 0;
3396 return rcStrict;
3397}
3398
3399
3400/**
3401 * Fetches the next opcode qword.
3402 *
3403 * @returns Strict VBox status code.
3404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3405 * @param pu64 Where to return the opcode qword.
3406 */
3407DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3408{
3409 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3410 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3411 {
3412# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3413 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3414# else
3415 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3416 pVCpu->iem.s.abOpcode[offOpcode + 1],
3417 pVCpu->iem.s.abOpcode[offOpcode + 2],
3418 pVCpu->iem.s.abOpcode[offOpcode + 3],
3419 pVCpu->iem.s.abOpcode[offOpcode + 4],
3420 pVCpu->iem.s.abOpcode[offOpcode + 5],
3421 pVCpu->iem.s.abOpcode[offOpcode + 6],
3422 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3423# endif
3424 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3425 return VINF_SUCCESS;
3426 }
3427 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3428}
3429
3430#else /* IEM_WITH_SETJMP */
3431
3432/**
3433 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3434 *
3435 * @returns The opcode qword.
3436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3437 */
3438DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3439{
3440# ifdef IEM_WITH_CODE_TLB
3441 uint64_t u64;
3442 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3443 return u64;
3444# else
3445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3446 if (rcStrict == VINF_SUCCESS)
3447 {
3448 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3449 pVCpu->iem.s.offOpcode = offOpcode + 8;
3450# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3451 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3452# else
3453 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3454 pVCpu->iem.s.abOpcode[offOpcode + 1],
3455 pVCpu->iem.s.abOpcode[offOpcode + 2],
3456 pVCpu->iem.s.abOpcode[offOpcode + 3],
3457 pVCpu->iem.s.abOpcode[offOpcode + 4],
3458 pVCpu->iem.s.abOpcode[offOpcode + 5],
3459 pVCpu->iem.s.abOpcode[offOpcode + 6],
3460 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3461# endif
3462 }
3463 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3464# endif
3465}
3466
3467
3468/**
3469 * Fetches the next opcode qword, longjmp on error.
3470 *
3471 * @returns The opcode qword.
3472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3473 */
3474DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3475{
3476# ifdef IEM_WITH_CODE_TLB
3477 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3478 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3479 if (RT_LIKELY( pbBuf != NULL
3480 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3481 {
3482 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3484 return *(uint64_t const *)&pbBuf[offBuf];
3485# else
3486 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3487 pbBuf[offBuf + 1],
3488 pbBuf[offBuf + 2],
3489 pbBuf[offBuf + 3],
3490 pbBuf[offBuf + 4],
3491 pbBuf[offBuf + 5],
3492 pbBuf[offBuf + 6],
3493 pbBuf[offBuf + 7]);
3494# endif
3495 }
3496# else
3497 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3498 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3499 {
3500 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3501# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3502 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3503# else
3504 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3505 pVCpu->iem.s.abOpcode[offOpcode + 1],
3506 pVCpu->iem.s.abOpcode[offOpcode + 2],
3507 pVCpu->iem.s.abOpcode[offOpcode + 3],
3508 pVCpu->iem.s.abOpcode[offOpcode + 4],
3509 pVCpu->iem.s.abOpcode[offOpcode + 5],
3510 pVCpu->iem.s.abOpcode[offOpcode + 6],
3511 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3512# endif
3513 }
3514# endif
3515 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3516}
3517
3518#endif /* IEM_WITH_SETJMP */
3519
3520/**
3521 * Fetches the next opcode quad word, returns automatically on failure.
3522 *
3523 * @param a_pu64 Where to return the opcode quad word.
3524 * @remark Implicitly references pVCpu.
3525 */
3526#ifndef IEM_WITH_SETJMP
3527# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3528 do \
3529 { \
3530 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3531 if (rcStrict2 != VINF_SUCCESS) \
3532 return rcStrict2; \
3533 } while (0)
3534#else
3535# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3536#endif
3537
3538
3539/** @name Misc Worker Functions.
3540 * @{
3541 */
3542
3543/**
3544 * Gets the exception class for the specified exception vector.
3545 *
3546 * @returns The class of the specified exception.
3547 * @param uVector The exception vector.
3548 */
3549IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3550{
3551 Assert(uVector <= X86_XCPT_LAST);
3552 switch (uVector)
3553 {
3554 case X86_XCPT_DE:
3555 case X86_XCPT_TS:
3556 case X86_XCPT_NP:
3557 case X86_XCPT_SS:
3558 case X86_XCPT_GP:
3559 case X86_XCPT_SX: /* AMD only */
3560 return IEMXCPTCLASS_CONTRIBUTORY;
3561
3562 case X86_XCPT_PF:
3563 case X86_XCPT_VE: /* Intel only */
3564 return IEMXCPTCLASS_PAGE_FAULT;
3565
3566 case X86_XCPT_DF:
3567 return IEMXCPTCLASS_DOUBLE_FAULT;
3568 }
3569 return IEMXCPTCLASS_BENIGN;
3570}
3571
3572
3573/**
3574 * Evaluates how to handle an exception caused during delivery of another event
3575 * (exception / interrupt).
3576 *
3577 * @returns How to handle the recursive exception.
3578 * @param pVCpu The cross context virtual CPU structure of the
3579 * calling thread.
3580 * @param fPrevFlags The flags of the previous event.
3581 * @param uPrevVector The vector of the previous event.
3582 * @param fCurFlags The flags of the current exception.
3583 * @param uCurVector The vector of the current exception.
3584 * @param pfXcptRaiseInfo Where to store additional information about the
3585 * exception condition. Optional.
3586 */
3587VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3588 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3589{
3590 /*
3591 * Only CPU exceptions can be raised while delivering other events, software interrupt
3592 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3593 */
3594 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3595 Assert(pVCpu); RT_NOREF(pVCpu);
3596 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3597
3598 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3599 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3600 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3601 {
3602 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3603 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3604 {
3605 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3606 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3607 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3608 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3609 {
3610 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3611 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3612 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3613 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3614 uCurVector, pVCpu->cpum.GstCtx.cr2));
3615 }
3616 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3617 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3618 {
3619 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3620 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3621 }
3622 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3623 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3624 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3625 {
3626 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3627 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3628 }
3629 }
3630 else
3631 {
3632 if (uPrevVector == X86_XCPT_NMI)
3633 {
3634 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3635 if (uCurVector == X86_XCPT_PF)
3636 {
3637 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3638 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3639 }
3640 }
3641 else if ( uPrevVector == X86_XCPT_AC
3642 && uCurVector == X86_XCPT_AC)
3643 {
3644 enmRaise = IEMXCPTRAISE_CPU_HANG;
3645 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3646 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3647 }
3648 }
3649 }
3650 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3651 {
3652 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3653 if (uCurVector == X86_XCPT_PF)
3654 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3655 }
3656 else
3657 {
3658 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3659 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3660 }
3661
3662 if (pfXcptRaiseInfo)
3663 *pfXcptRaiseInfo = fRaiseInfo;
3664 return enmRaise;
3665}
3666
3667
3668/**
3669 * Enters the CPU shutdown state initiated by a triple fault or other
3670 * unrecoverable conditions.
3671 *
3672 * @returns Strict VBox status code.
3673 * @param pVCpu The cross context virtual CPU structure of the
3674 * calling thread.
3675 */
3676IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3677{
3678 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3679 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3680
3681 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3682 {
3683 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3684 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3685 }
3686
3687 RT_NOREF(pVCpu);
3688 return VINF_EM_TRIPLE_FAULT;
3689}
3690
3691
3692/**
3693 * Validates a new SS segment.
3694 *
3695 * @returns VBox strict status code.
3696 * @param pVCpu The cross context virtual CPU structure of the
3697 * calling thread.
3698 * @param NewSS The new SS selctor.
3699 * @param uCpl The CPL to load the stack for.
3700 * @param pDesc Where to return the descriptor.
3701 */
3702IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3703{
3704 /* Null selectors are not allowed (we're not called for dispatching
3705 interrupts with SS=0 in long mode). */
3706 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3707 {
3708 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3709 return iemRaiseTaskSwitchFault0(pVCpu);
3710 }
3711
3712 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3713 if ((NewSS & X86_SEL_RPL) != uCpl)
3714 {
3715 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3716 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3717 }
3718
3719 /*
3720 * Read the descriptor.
3721 */
3722 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725
3726 /*
3727 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3728 */
3729 if (!pDesc->Legacy.Gen.u1DescType)
3730 {
3731 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3732 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3733 }
3734
3735 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3736 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3737 {
3738 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3739 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3740 }
3741 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3742 {
3743 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3744 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3745 }
3746
3747 /* Is it there? */
3748 /** @todo testcase: Is this checked before the canonical / limit check below? */
3749 if (!pDesc->Legacy.Gen.u1Present)
3750 {
3751 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3752 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3753 }
3754
3755 return VINF_SUCCESS;
3756}
3757
3758
3759/**
3760 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3761 * not (kind of obsolete now).
3762 *
3763 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3764 */
3765#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3766
3767/**
3768 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3769 *
3770 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3771 * @param a_fEfl The new EFLAGS.
3772 */
3773#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3774
3775/** @} */
3776
3777
3778/** @name Raising Exceptions.
3779 *
3780 * @{
3781 */
3782
3783
3784/**
3785 * Loads the specified stack far pointer from the TSS.
3786 *
3787 * @returns VBox strict status code.
3788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3789 * @param uCpl The CPL to load the stack for.
3790 * @param pSelSS Where to return the new stack segment.
3791 * @param puEsp Where to return the new stack pointer.
3792 */
3793IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3794{
3795 VBOXSTRICTRC rcStrict;
3796 Assert(uCpl < 4);
3797
3798 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3799 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3800 {
3801 /*
3802 * 16-bit TSS (X86TSS16).
3803 */
3804 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3805 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3806 {
3807 uint32_t off = uCpl * 4 + 2;
3808 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3809 {
3810 /** @todo check actual access pattern here. */
3811 uint32_t u32Tmp = 0; /* gcc maybe... */
3812 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3813 if (rcStrict == VINF_SUCCESS)
3814 {
3815 *puEsp = RT_LOWORD(u32Tmp);
3816 *pSelSS = RT_HIWORD(u32Tmp);
3817 return VINF_SUCCESS;
3818 }
3819 }
3820 else
3821 {
3822 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3823 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3824 }
3825 break;
3826 }
3827
3828 /*
3829 * 32-bit TSS (X86TSS32).
3830 */
3831 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3832 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3833 {
3834 uint32_t off = uCpl * 8 + 4;
3835 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3836 {
3837/** @todo check actual access pattern here. */
3838 uint64_t u64Tmp;
3839 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3840 if (rcStrict == VINF_SUCCESS)
3841 {
3842 *puEsp = u64Tmp & UINT32_MAX;
3843 *pSelSS = (RTSEL)(u64Tmp >> 32);
3844 return VINF_SUCCESS;
3845 }
3846 }
3847 else
3848 {
3849 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3850 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3851 }
3852 break;
3853 }
3854
3855 default:
3856 AssertFailed();
3857 rcStrict = VERR_IEM_IPE_4;
3858 break;
3859 }
3860
3861 *puEsp = 0; /* make gcc happy */
3862 *pSelSS = 0; /* make gcc happy */
3863 return rcStrict;
3864}
3865
3866
3867/**
3868 * Loads the specified stack pointer from the 64-bit TSS.
3869 *
3870 * @returns VBox strict status code.
3871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3872 * @param uCpl The CPL to load the stack for.
3873 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3874 * @param puRsp Where to return the new stack pointer.
3875 */
3876IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3877{
3878 Assert(uCpl < 4);
3879 Assert(uIst < 8);
3880 *puRsp = 0; /* make gcc happy */
3881
3882 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3883 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3884
3885 uint32_t off;
3886 if (uIst)
3887 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3888 else
3889 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3890 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3891 {
3892 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3893 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3894 }
3895
3896 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3897}
3898
3899
3900/**
3901 * Adjust the CPU state according to the exception being raised.
3902 *
3903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3904 * @param u8Vector The exception that has been raised.
3905 */
3906DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3907{
3908 switch (u8Vector)
3909 {
3910 case X86_XCPT_DB:
3911 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3912 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3913 break;
3914 /** @todo Read the AMD and Intel exception reference... */
3915 }
3916}
3917
3918
3919/**
3920 * Implements exceptions and interrupts for real mode.
3921 *
3922 * @returns VBox strict status code.
3923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3924 * @param cbInstr The number of bytes to offset rIP by in the return
3925 * address.
3926 * @param u8Vector The interrupt / exception vector number.
3927 * @param fFlags The flags.
3928 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3929 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3930 */
3931IEM_STATIC VBOXSTRICTRC
3932iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3933 uint8_t cbInstr,
3934 uint8_t u8Vector,
3935 uint32_t fFlags,
3936 uint16_t uErr,
3937 uint64_t uCr2)
3938{
3939 NOREF(uErr); NOREF(uCr2);
3940 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3941
3942 /*
3943 * Read the IDT entry.
3944 */
3945 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3946 {
3947 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3948 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3949 }
3950 RTFAR16 Idte;
3951 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3952 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3953 {
3954 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3955 return rcStrict;
3956 }
3957
3958 /*
3959 * Push the stack frame.
3960 */
3961 uint16_t *pu16Frame;
3962 uint64_t uNewRsp;
3963 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3964 if (rcStrict != VINF_SUCCESS)
3965 return rcStrict;
3966
3967 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3968#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3969 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3970 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3971 fEfl |= UINT16_C(0xf000);
3972#endif
3973 pu16Frame[2] = (uint16_t)fEfl;
3974 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3975 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3976 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3977 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3978 return rcStrict;
3979
3980 /*
3981 * Load the vector address into cs:ip and make exception specific state
3982 * adjustments.
3983 */
3984 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3985 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3986 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3987 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3988 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3989 pVCpu->cpum.GstCtx.rip = Idte.off;
3990 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3991 IEMMISC_SET_EFL(pVCpu, fEfl);
3992
3993 /** @todo do we actually do this in real mode? */
3994 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3995 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3996
3997 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3998}
3999
4000
4001/**
4002 * Loads a NULL data selector into when coming from V8086 mode.
4003 *
4004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4005 * @param pSReg Pointer to the segment register.
4006 */
4007IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
4008{
4009 pSReg->Sel = 0;
4010 pSReg->ValidSel = 0;
4011 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4012 {
4013 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
4014 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
4015 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
4016 }
4017 else
4018 {
4019 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4020 /** @todo check this on AMD-V */
4021 pSReg->u64Base = 0;
4022 pSReg->u32Limit = 0;
4023 }
4024}
4025
4026
4027/**
4028 * Loads a segment selector during a task switch in V8086 mode.
4029 *
4030 * @param pSReg Pointer to the segment register.
4031 * @param uSel The selector value to load.
4032 */
4033IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
4034{
4035 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4036 pSReg->Sel = uSel;
4037 pSReg->ValidSel = uSel;
4038 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4039 pSReg->u64Base = uSel << 4;
4040 pSReg->u32Limit = 0xffff;
4041 pSReg->Attr.u = 0xf3;
4042}
4043
4044
4045/**
4046 * Loads a NULL data selector into a selector register, both the hidden and
4047 * visible parts, in protected mode.
4048 *
4049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4050 * @param pSReg Pointer to the segment register.
4051 * @param uRpl The RPL.
4052 */
4053IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
4054{
4055 /** @todo Testcase: write a testcase checking what happends when loading a NULL
4056 * data selector in protected mode. */
4057 pSReg->Sel = uRpl;
4058 pSReg->ValidSel = uRpl;
4059 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4060 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4061 {
4062 /* VT-x (Intel 3960x) observed doing something like this. */
4063 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
4064 pSReg->u32Limit = UINT32_MAX;
4065 pSReg->u64Base = 0;
4066 }
4067 else
4068 {
4069 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
4070 pSReg->u32Limit = 0;
4071 pSReg->u64Base = 0;
4072 }
4073}
4074
4075
4076/**
4077 * Loads a segment selector during a task switch in protected mode.
4078 *
4079 * In this task switch scenario, we would throw \#TS exceptions rather than
4080 * \#GPs.
4081 *
4082 * @returns VBox strict status code.
4083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4084 * @param pSReg Pointer to the segment register.
4085 * @param uSel The new selector value.
4086 *
4087 * @remarks This does _not_ handle CS or SS.
4088 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
4089 */
4090IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
4091{
4092 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4093
4094 /* Null data selector. */
4095 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4096 {
4097 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
4098 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
4099 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4100 return VINF_SUCCESS;
4101 }
4102
4103 /* Fetch the descriptor. */
4104 IEMSELDESC Desc;
4105 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
4106 if (rcStrict != VINF_SUCCESS)
4107 {
4108 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
4109 VBOXSTRICTRC_VAL(rcStrict)));
4110 return rcStrict;
4111 }
4112
4113 /* Must be a data segment or readable code segment. */
4114 if ( !Desc.Legacy.Gen.u1DescType
4115 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4116 {
4117 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
4118 Desc.Legacy.Gen.u4Type));
4119 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4120 }
4121
4122 /* Check privileges for data segments and non-conforming code segments. */
4123 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4124 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4125 {
4126 /* The RPL and the new CPL must be less than or equal to the DPL. */
4127 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4128 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
4129 {
4130 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
4131 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4132 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4133 }
4134 }
4135
4136 /* Is it there? */
4137 if (!Desc.Legacy.Gen.u1Present)
4138 {
4139 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
4140 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4141 }
4142
4143 /* The base and limit. */
4144 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4145 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4146
4147 /*
4148 * Ok, everything checked out fine. Now set the accessed bit before
4149 * committing the result into the registers.
4150 */
4151 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4152 {
4153 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4154 if (rcStrict != VINF_SUCCESS)
4155 return rcStrict;
4156 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4157 }
4158
4159 /* Commit */
4160 pSReg->Sel = uSel;
4161 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4162 pSReg->u32Limit = cbLimit;
4163 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
4164 pSReg->ValidSel = uSel;
4165 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4166 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4167 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
4168
4169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
4170 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4171 return VINF_SUCCESS;
4172}
4173
4174
4175/**
4176 * Performs a task switch.
4177 *
4178 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4179 * caller is responsible for performing the necessary checks (like DPL, TSS
4180 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4181 * reference for JMP, CALL, IRET.
4182 *
4183 * If the task switch is the due to a software interrupt or hardware exception,
4184 * the caller is responsible for validating the TSS selector and descriptor. See
4185 * Intel Instruction reference for INT n.
4186 *
4187 * @returns VBox strict status code.
4188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4189 * @param enmTaskSwitch The cause of the task switch.
4190 * @param uNextEip The EIP effective after the task switch.
4191 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4192 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4193 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4194 * @param SelTSS The TSS selector of the new task.
4195 * @param pNewDescTSS Pointer to the new TSS descriptor.
4196 */
4197IEM_STATIC VBOXSTRICTRC
4198iemTaskSwitch(PVMCPUCC pVCpu,
4199 IEMTASKSWITCH enmTaskSwitch,
4200 uint32_t uNextEip,
4201 uint32_t fFlags,
4202 uint16_t uErr,
4203 uint64_t uCr2,
4204 RTSEL SelTSS,
4205 PIEMSELDESC pNewDescTSS)
4206{
4207 Assert(!IEM_IS_REAL_MODE(pVCpu));
4208 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4209 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4210
4211 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4212 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4213 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4214 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4215 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4216
4217 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4218 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4219
4220 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4221 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4222
4223 /* Update CR2 in case it's a page-fault. */
4224 /** @todo This should probably be done much earlier in IEM/PGM. See
4225 * @bugref{5653#c49}. */
4226 if (fFlags & IEM_XCPT_FLAGS_CR2)
4227 pVCpu->cpum.GstCtx.cr2 = uCr2;
4228
4229 /*
4230 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4231 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4232 */
4233 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4234 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4235 if (uNewTSSLimit < uNewTSSLimitMin)
4236 {
4237 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4238 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4239 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4240 }
4241
4242 /*
4243 * Task switches in VMX non-root mode always cause task switches.
4244 * The new TSS must have been read and validated (DPL, limits etc.) before a
4245 * task-switch VM-exit commences.
4246 *
4247 * See Intel spec. 25.4.2 "Treatment of Task Switches".
4248 */
4249 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4250 {
4251 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4252 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4253 }
4254
4255 /*
4256 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4257 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4258 */
4259 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4260 {
4261 uint32_t const uExitInfo1 = SelTSS;
4262 uint32_t uExitInfo2 = uErr;
4263 switch (enmTaskSwitch)
4264 {
4265 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4266 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4267 default: break;
4268 }
4269 if (fFlags & IEM_XCPT_FLAGS_ERR)
4270 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4271 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4272 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4273
4274 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4275 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4276 RT_NOREF2(uExitInfo1, uExitInfo2);
4277 }
4278
4279 /*
4280 * Check the current TSS limit. The last written byte to the current TSS during the
4281 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4282 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4283 *
4284 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4285 * end up with smaller than "legal" TSS limits.
4286 */
4287 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4288 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4289 if (uCurTSSLimit < uCurTSSLimitMin)
4290 {
4291 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4292 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4293 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4294 }
4295
4296 /*
4297 * Verify that the new TSS can be accessed and map it. Map only the required contents
4298 * and not the entire TSS.
4299 */
4300 void *pvNewTSS;
4301 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4302 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4303 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4304 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4305 * not perform correct translation if this happens. See Intel spec. 7.2.1
4306 * "Task-State Segment". */
4307 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4308 if (rcStrict != VINF_SUCCESS)
4309 {
4310 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4311 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4312 return rcStrict;
4313 }
4314
4315 /*
4316 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4317 */
4318 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4319 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4320 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4321 {
4322 PX86DESC pDescCurTSS;
4323 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4324 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4325 if (rcStrict != VINF_SUCCESS)
4326 {
4327 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4328 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4329 return rcStrict;
4330 }
4331
4332 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4333 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4334 if (rcStrict != VINF_SUCCESS)
4335 {
4336 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4337 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4338 return rcStrict;
4339 }
4340
4341 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4342 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4343 {
4344 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4345 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4346 u32EFlags &= ~X86_EFL_NT;
4347 }
4348 }
4349
4350 /*
4351 * Save the CPU state into the current TSS.
4352 */
4353 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4354 if (GCPtrNewTSS == GCPtrCurTSS)
4355 {
4356 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4357 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4358 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4359 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4360 pVCpu->cpum.GstCtx.ldtr.Sel));
4361 }
4362 if (fIsNewTSS386)
4363 {
4364 /*
4365 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4366 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4367 */
4368 void *pvCurTSS32;
4369 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4370 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4371 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4372 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4373 if (rcStrict != VINF_SUCCESS)
4374 {
4375 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4376 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4377 return rcStrict;
4378 }
4379
4380 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4381 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4382 pCurTSS32->eip = uNextEip;
4383 pCurTSS32->eflags = u32EFlags;
4384 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4385 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4386 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4387 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4388 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4389 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4390 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4391 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4392 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4393 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4394 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4395 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4396 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4397 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4398
4399 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4400 if (rcStrict != VINF_SUCCESS)
4401 {
4402 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4403 VBOXSTRICTRC_VAL(rcStrict)));
4404 return rcStrict;
4405 }
4406 }
4407 else
4408 {
4409 /*
4410 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4411 */
4412 void *pvCurTSS16;
4413 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4414 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4415 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4416 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4417 if (rcStrict != VINF_SUCCESS)
4418 {
4419 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4420 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4421 return rcStrict;
4422 }
4423
4424 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4425 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4426 pCurTSS16->ip = uNextEip;
4427 pCurTSS16->flags = u32EFlags;
4428 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4429 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4430 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4431 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4432 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4433 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4434 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4435 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4436 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4437 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4438 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4439 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4440
4441 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4442 if (rcStrict != VINF_SUCCESS)
4443 {
4444 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4445 VBOXSTRICTRC_VAL(rcStrict)));
4446 return rcStrict;
4447 }
4448 }
4449
4450 /*
4451 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4452 */
4453 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4454 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4455 {
4456 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4457 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4458 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4459 }
4460
4461 /*
4462 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4463 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4464 */
4465 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4466 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4467 bool fNewDebugTrap;
4468 if (fIsNewTSS386)
4469 {
4470 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4471 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4472 uNewEip = pNewTSS32->eip;
4473 uNewEflags = pNewTSS32->eflags;
4474 uNewEax = pNewTSS32->eax;
4475 uNewEcx = pNewTSS32->ecx;
4476 uNewEdx = pNewTSS32->edx;
4477 uNewEbx = pNewTSS32->ebx;
4478 uNewEsp = pNewTSS32->esp;
4479 uNewEbp = pNewTSS32->ebp;
4480 uNewEsi = pNewTSS32->esi;
4481 uNewEdi = pNewTSS32->edi;
4482 uNewES = pNewTSS32->es;
4483 uNewCS = pNewTSS32->cs;
4484 uNewSS = pNewTSS32->ss;
4485 uNewDS = pNewTSS32->ds;
4486 uNewFS = pNewTSS32->fs;
4487 uNewGS = pNewTSS32->gs;
4488 uNewLdt = pNewTSS32->selLdt;
4489 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4490 }
4491 else
4492 {
4493 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4494 uNewCr3 = 0;
4495 uNewEip = pNewTSS16->ip;
4496 uNewEflags = pNewTSS16->flags;
4497 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4498 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4499 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4500 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4501 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4502 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4503 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4504 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4505 uNewES = pNewTSS16->es;
4506 uNewCS = pNewTSS16->cs;
4507 uNewSS = pNewTSS16->ss;
4508 uNewDS = pNewTSS16->ds;
4509 uNewFS = 0;
4510 uNewGS = 0;
4511 uNewLdt = pNewTSS16->selLdt;
4512 fNewDebugTrap = false;
4513 }
4514
4515 if (GCPtrNewTSS == GCPtrCurTSS)
4516 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4517 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4518
4519 /*
4520 * We're done accessing the new TSS.
4521 */
4522 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4523 if (rcStrict != VINF_SUCCESS)
4524 {
4525 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4526 return rcStrict;
4527 }
4528
4529 /*
4530 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4531 */
4532 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4533 {
4534 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4535 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4536 if (rcStrict != VINF_SUCCESS)
4537 {
4538 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4539 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4540 return rcStrict;
4541 }
4542
4543 /* Check that the descriptor indicates the new TSS is available (not busy). */
4544 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4545 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4546 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4547
4548 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4549 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4550 if (rcStrict != VINF_SUCCESS)
4551 {
4552 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4553 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4554 return rcStrict;
4555 }
4556 }
4557
4558 /*
4559 * From this point on, we're technically in the new task. We will defer exceptions
4560 * until the completion of the task switch but before executing any instructions in the new task.
4561 */
4562 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4563 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4564 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4565 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4566 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4567 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4568 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4569
4570 /* Set the busy bit in TR. */
4571 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4572
4573 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4574 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4575 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4576 {
4577 uNewEflags |= X86_EFL_NT;
4578 }
4579
4580 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4581 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4582 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4583
4584 pVCpu->cpum.GstCtx.eip = uNewEip;
4585 pVCpu->cpum.GstCtx.eax = uNewEax;
4586 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4587 pVCpu->cpum.GstCtx.edx = uNewEdx;
4588 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4589 pVCpu->cpum.GstCtx.esp = uNewEsp;
4590 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4591 pVCpu->cpum.GstCtx.esi = uNewEsi;
4592 pVCpu->cpum.GstCtx.edi = uNewEdi;
4593
4594 uNewEflags &= X86_EFL_LIVE_MASK;
4595 uNewEflags |= X86_EFL_RA1_MASK;
4596 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4597
4598 /*
4599 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4600 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4601 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4602 */
4603 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4604 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4605
4606 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4607 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4608
4609 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4610 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4611
4612 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4613 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4614
4615 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4616 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4617
4618 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4619 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4620 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4621
4622 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4623 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4624 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4625 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4626
4627 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4628 {
4629 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4630 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4631 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4632 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4633 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4634 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4635 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4636 }
4637
4638 /*
4639 * Switch CR3 for the new task.
4640 */
4641 if ( fIsNewTSS386
4642 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4643 {
4644 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4645 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4646 AssertRCSuccessReturn(rc, rc);
4647
4648 /* Inform PGM. */
4649 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4650 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4651 AssertRCReturn(rc, rc);
4652 /* ignore informational status codes */
4653
4654 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4655 }
4656
4657 /*
4658 * Switch LDTR for the new task.
4659 */
4660 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4661 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4662 else
4663 {
4664 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4665
4666 IEMSELDESC DescNewLdt;
4667 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4668 if (rcStrict != VINF_SUCCESS)
4669 {
4670 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4671 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4672 return rcStrict;
4673 }
4674 if ( !DescNewLdt.Legacy.Gen.u1Present
4675 || DescNewLdt.Legacy.Gen.u1DescType
4676 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4677 {
4678 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4679 uNewLdt, DescNewLdt.Legacy.u));
4680 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4681 }
4682
4683 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4684 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4685 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4686 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4687 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4688 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4689 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4691 }
4692
4693 IEMSELDESC DescSS;
4694 if (IEM_IS_V86_MODE(pVCpu))
4695 {
4696 pVCpu->iem.s.uCpl = 3;
4697 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4698 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4699 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4700 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4701 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4702 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4703
4704 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4705 DescSS.Legacy.u = 0;
4706 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4707 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4708 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4709 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4710 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4711 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4712 DescSS.Legacy.Gen.u2Dpl = 3;
4713 }
4714 else
4715 {
4716 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4717
4718 /*
4719 * Load the stack segment for the new task.
4720 */
4721 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4722 {
4723 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4724 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4725 }
4726
4727 /* Fetch the descriptor. */
4728 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4729 if (rcStrict != VINF_SUCCESS)
4730 {
4731 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4732 VBOXSTRICTRC_VAL(rcStrict)));
4733 return rcStrict;
4734 }
4735
4736 /* SS must be a data segment and writable. */
4737 if ( !DescSS.Legacy.Gen.u1DescType
4738 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4739 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4740 {
4741 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4742 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4743 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4744 }
4745
4746 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4747 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4748 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4749 {
4750 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4751 uNewCpl));
4752 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4753 }
4754
4755 /* Is it there? */
4756 if (!DescSS.Legacy.Gen.u1Present)
4757 {
4758 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4759 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4760 }
4761
4762 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4763 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4764
4765 /* Set the accessed bit before committing the result into SS. */
4766 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4767 {
4768 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4769 if (rcStrict != VINF_SUCCESS)
4770 return rcStrict;
4771 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4772 }
4773
4774 /* Commit SS. */
4775 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4776 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4777 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4778 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4779 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4780 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4782
4783 /* CPL has changed, update IEM before loading rest of segments. */
4784 pVCpu->iem.s.uCpl = uNewCpl;
4785
4786 /*
4787 * Load the data segments for the new task.
4788 */
4789 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4790 if (rcStrict != VINF_SUCCESS)
4791 return rcStrict;
4792 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4793 if (rcStrict != VINF_SUCCESS)
4794 return rcStrict;
4795 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4796 if (rcStrict != VINF_SUCCESS)
4797 return rcStrict;
4798 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4799 if (rcStrict != VINF_SUCCESS)
4800 return rcStrict;
4801
4802 /*
4803 * Load the code segment for the new task.
4804 */
4805 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4806 {
4807 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4808 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4809 }
4810
4811 /* Fetch the descriptor. */
4812 IEMSELDESC DescCS;
4813 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4814 if (rcStrict != VINF_SUCCESS)
4815 {
4816 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4817 return rcStrict;
4818 }
4819
4820 /* CS must be a code segment. */
4821 if ( !DescCS.Legacy.Gen.u1DescType
4822 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4823 {
4824 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4825 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4826 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4827 }
4828
4829 /* For conforming CS, DPL must be less than or equal to the RPL. */
4830 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4831 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4832 {
4833 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4834 DescCS.Legacy.Gen.u2Dpl));
4835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4836 }
4837
4838 /* For non-conforming CS, DPL must match RPL. */
4839 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4840 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4841 {
4842 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4843 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4844 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4845 }
4846
4847 /* Is it there? */
4848 if (!DescCS.Legacy.Gen.u1Present)
4849 {
4850 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4851 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4852 }
4853
4854 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4855 u64Base = X86DESC_BASE(&DescCS.Legacy);
4856
4857 /* Set the accessed bit before committing the result into CS. */
4858 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4859 {
4860 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4861 if (rcStrict != VINF_SUCCESS)
4862 return rcStrict;
4863 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4864 }
4865
4866 /* Commit CS. */
4867 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4868 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4869 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4870 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4871 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4872 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4873 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4874 }
4875
4876 /** @todo Debug trap. */
4877 if (fIsNewTSS386 && fNewDebugTrap)
4878 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4879
4880 /*
4881 * Construct the error code masks based on what caused this task switch.
4882 * See Intel Instruction reference for INT.
4883 */
4884 uint16_t uExt;
4885 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4886 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4887 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4888 {
4889 uExt = 1;
4890 }
4891 else
4892 uExt = 0;
4893
4894 /*
4895 * Push any error code on to the new stack.
4896 */
4897 if (fFlags & IEM_XCPT_FLAGS_ERR)
4898 {
4899 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4900 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4901 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4902
4903 /* Check that there is sufficient space on the stack. */
4904 /** @todo Factor out segment limit checking for normal/expand down segments
4905 * into a separate function. */
4906 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4907 {
4908 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4909 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4910 {
4911 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4912 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4913 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4914 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4915 }
4916 }
4917 else
4918 {
4919 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4920 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4921 {
4922 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4923 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4924 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4925 }
4926 }
4927
4928
4929 if (fIsNewTSS386)
4930 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4931 else
4932 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4933 if (rcStrict != VINF_SUCCESS)
4934 {
4935 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4936 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4937 return rcStrict;
4938 }
4939 }
4940
4941 /* Check the new EIP against the new CS limit. */
4942 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4943 {
4944 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4945 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4946 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4947 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4948 }
4949
4950 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4951 pVCpu->cpum.GstCtx.ss.Sel));
4952 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4953}
4954
4955
4956/**
4957 * Implements exceptions and interrupts for protected mode.
4958 *
4959 * @returns VBox strict status code.
4960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4961 * @param cbInstr The number of bytes to offset rIP by in the return
4962 * address.
4963 * @param u8Vector The interrupt / exception vector number.
4964 * @param fFlags The flags.
4965 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4966 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4967 */
4968IEM_STATIC VBOXSTRICTRC
4969iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4970 uint8_t cbInstr,
4971 uint8_t u8Vector,
4972 uint32_t fFlags,
4973 uint16_t uErr,
4974 uint64_t uCr2)
4975{
4976 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4977
4978 /*
4979 * Read the IDT entry.
4980 */
4981 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4982 {
4983 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4984 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4985 }
4986 X86DESC Idte;
4987 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4988 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4989 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4990 {
4991 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4992 return rcStrict;
4993 }
4994 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4995 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4996 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4997
4998 /*
4999 * Check the descriptor type, DPL and such.
5000 * ASSUMES this is done in the same order as described for call-gate calls.
5001 */
5002 if (Idte.Gate.u1DescType)
5003 {
5004 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5005 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5006 }
5007 bool fTaskGate = false;
5008 uint8_t f32BitGate = true;
5009 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5010 switch (Idte.Gate.u4Type)
5011 {
5012 case X86_SEL_TYPE_SYS_UNDEFINED:
5013 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
5014 case X86_SEL_TYPE_SYS_LDT:
5015 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
5016 case X86_SEL_TYPE_SYS_286_CALL_GATE:
5017 case X86_SEL_TYPE_SYS_UNDEFINED2:
5018 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
5019 case X86_SEL_TYPE_SYS_UNDEFINED3:
5020 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
5021 case X86_SEL_TYPE_SYS_386_CALL_GATE:
5022 case X86_SEL_TYPE_SYS_UNDEFINED4:
5023 {
5024 /** @todo check what actually happens when the type is wrong...
5025 * esp. call gates. */
5026 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5027 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5028 }
5029
5030 case X86_SEL_TYPE_SYS_286_INT_GATE:
5031 f32BitGate = false;
5032 RT_FALL_THRU();
5033 case X86_SEL_TYPE_SYS_386_INT_GATE:
5034 fEflToClear |= X86_EFL_IF;
5035 break;
5036
5037 case X86_SEL_TYPE_SYS_TASK_GATE:
5038 fTaskGate = true;
5039#ifndef IEM_IMPLEMENTS_TASKSWITCH
5040 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
5041#endif
5042 break;
5043
5044 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
5045 f32BitGate = false;
5046 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
5047 break;
5048
5049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5050 }
5051
5052 /* Check DPL against CPL if applicable. */
5053 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5054 {
5055 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5056 {
5057 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5058 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5059 }
5060 }
5061
5062 /* Is it there? */
5063 if (!Idte.Gate.u1Present)
5064 {
5065 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
5066 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5067 }
5068
5069 /* Is it a task-gate? */
5070 if (fTaskGate)
5071 {
5072 /*
5073 * Construct the error code masks based on what caused this task switch.
5074 * See Intel Instruction reference for INT.
5075 */
5076 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5077 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
5078 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
5079 RTSEL SelTSS = Idte.Gate.u16Sel;
5080
5081 /*
5082 * Fetch the TSS descriptor in the GDT.
5083 */
5084 IEMSELDESC DescTSS;
5085 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
5086 if (rcStrict != VINF_SUCCESS)
5087 {
5088 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
5089 VBOXSTRICTRC_VAL(rcStrict)));
5090 return rcStrict;
5091 }
5092
5093 /* The TSS descriptor must be a system segment and be available (not busy). */
5094 if ( DescTSS.Legacy.Gen.u1DescType
5095 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5096 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
5097 {
5098 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
5099 u8Vector, SelTSS, DescTSS.Legacy.au64));
5100 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
5101 }
5102
5103 /* The TSS must be present. */
5104 if (!DescTSS.Legacy.Gen.u1Present)
5105 {
5106 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
5107 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
5108 }
5109
5110 /* Do the actual task switch. */
5111 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
5112 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
5113 fFlags, uErr, uCr2, SelTSS, &DescTSS);
5114 }
5115
5116 /* A null CS is bad. */
5117 RTSEL NewCS = Idte.Gate.u16Sel;
5118 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5119 {
5120 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5121 return iemRaiseGeneralProtectionFault0(pVCpu);
5122 }
5123
5124 /* Fetch the descriptor for the new CS. */
5125 IEMSELDESC DescCS;
5126 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
5127 if (rcStrict != VINF_SUCCESS)
5128 {
5129 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5130 return rcStrict;
5131 }
5132
5133 /* Must be a code segment. */
5134 if (!DescCS.Legacy.Gen.u1DescType)
5135 {
5136 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5137 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5138 }
5139 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
5140 {
5141 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5142 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5143 }
5144
5145 /* Don't allow lowering the privilege level. */
5146 /** @todo Does the lowering of privileges apply to software interrupts
5147 * only? This has bearings on the more-privileged or
5148 * same-privilege stack behavior further down. A testcase would
5149 * be nice. */
5150 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5151 {
5152 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5153 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5154 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5155 }
5156
5157 /* Make sure the selector is present. */
5158 if (!DescCS.Legacy.Gen.u1Present)
5159 {
5160 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5161 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5162 }
5163
5164 /* Check the new EIP against the new CS limit. */
5165 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
5166 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
5167 ? Idte.Gate.u16OffsetLow
5168 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
5169 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
5170 if (uNewEip > cbLimitCS)
5171 {
5172 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
5173 u8Vector, uNewEip, cbLimitCS, NewCS));
5174 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5175 }
5176 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
5177
5178 /* Calc the flag image to push. */
5179 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5180 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5181 fEfl &= ~X86_EFL_RF;
5182 else
5183 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5184
5185 /* From V8086 mode only go to CPL 0. */
5186 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5187 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5188 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5189 {
5190 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5191 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5192 }
5193
5194 /*
5195 * If the privilege level changes, we need to get a new stack from the TSS.
5196 * This in turns means validating the new SS and ESP...
5197 */
5198 if (uNewCpl != pVCpu->iem.s.uCpl)
5199 {
5200 RTSEL NewSS;
5201 uint32_t uNewEsp;
5202 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5203 if (rcStrict != VINF_SUCCESS)
5204 return rcStrict;
5205
5206 IEMSELDESC DescSS;
5207 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5208 if (rcStrict != VINF_SUCCESS)
5209 return rcStrict;
5210 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5211 if (!DescSS.Legacy.Gen.u1DefBig)
5212 {
5213 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5214 uNewEsp = (uint16_t)uNewEsp;
5215 }
5216
5217 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5218
5219 /* Check that there is sufficient space for the stack frame. */
5220 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5221 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5222 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5223 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5224
5225 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5226 {
5227 if ( uNewEsp - 1 > cbLimitSS
5228 || uNewEsp < cbStackFrame)
5229 {
5230 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5231 u8Vector, NewSS, uNewEsp, cbStackFrame));
5232 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5233 }
5234 }
5235 else
5236 {
5237 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5238 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5239 {
5240 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5241 u8Vector, NewSS, uNewEsp, cbStackFrame));
5242 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5243 }
5244 }
5245
5246 /*
5247 * Start making changes.
5248 */
5249
5250 /* Set the new CPL so that stack accesses use it. */
5251 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5252 pVCpu->iem.s.uCpl = uNewCpl;
5253
5254 /* Create the stack frame. */
5255 RTPTRUNION uStackFrame;
5256 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5257 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5258 if (rcStrict != VINF_SUCCESS)
5259 return rcStrict;
5260 void * const pvStackFrame = uStackFrame.pv;
5261 if (f32BitGate)
5262 {
5263 if (fFlags & IEM_XCPT_FLAGS_ERR)
5264 *uStackFrame.pu32++ = uErr;
5265 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5266 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5267 uStackFrame.pu32[2] = fEfl;
5268 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5269 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5270 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5271 if (fEfl & X86_EFL_VM)
5272 {
5273 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5274 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5275 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5276 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5277 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5278 }
5279 }
5280 else
5281 {
5282 if (fFlags & IEM_XCPT_FLAGS_ERR)
5283 *uStackFrame.pu16++ = uErr;
5284 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5285 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5286 uStackFrame.pu16[2] = fEfl;
5287 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5288 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5289 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5290 if (fEfl & X86_EFL_VM)
5291 {
5292 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5293 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5294 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5295 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5296 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5297 }
5298 }
5299 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5300 if (rcStrict != VINF_SUCCESS)
5301 return rcStrict;
5302
5303 /* Mark the selectors 'accessed' (hope this is the correct time). */
5304 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5305 * after pushing the stack frame? (Write protect the gdt + stack to
5306 * find out.) */
5307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5308 {
5309 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5310 if (rcStrict != VINF_SUCCESS)
5311 return rcStrict;
5312 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5313 }
5314
5315 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5316 {
5317 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5318 if (rcStrict != VINF_SUCCESS)
5319 return rcStrict;
5320 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5321 }
5322
5323 /*
5324 * Start comitting the register changes (joins with the DPL=CPL branch).
5325 */
5326 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5327 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5328 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5329 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5330 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5331 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5332 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5333 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5334 * SP is loaded).
5335 * Need to check the other combinations too:
5336 * - 16-bit TSS, 32-bit handler
5337 * - 32-bit TSS, 16-bit handler */
5338 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5339 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5340 else
5341 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5342
5343 if (fEfl & X86_EFL_VM)
5344 {
5345 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5346 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5347 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5348 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5349 }
5350 }
5351 /*
5352 * Same privilege, no stack change and smaller stack frame.
5353 */
5354 else
5355 {
5356 uint64_t uNewRsp;
5357 RTPTRUNION uStackFrame;
5358 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5359 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5360 if (rcStrict != VINF_SUCCESS)
5361 return rcStrict;
5362 void * const pvStackFrame = uStackFrame.pv;
5363
5364 if (f32BitGate)
5365 {
5366 if (fFlags & IEM_XCPT_FLAGS_ERR)
5367 *uStackFrame.pu32++ = uErr;
5368 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5369 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5370 uStackFrame.pu32[2] = fEfl;
5371 }
5372 else
5373 {
5374 if (fFlags & IEM_XCPT_FLAGS_ERR)
5375 *uStackFrame.pu16++ = uErr;
5376 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5377 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5378 uStackFrame.pu16[2] = fEfl;
5379 }
5380 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5381 if (rcStrict != VINF_SUCCESS)
5382 return rcStrict;
5383
5384 /* Mark the CS selector as 'accessed'. */
5385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5386 {
5387 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5388 if (rcStrict != VINF_SUCCESS)
5389 return rcStrict;
5390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5391 }
5392
5393 /*
5394 * Start committing the register changes (joins with the other branch).
5395 */
5396 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5397 }
5398
5399 /* ... register committing continues. */
5400 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5401 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5402 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5403 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5404 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5405 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5406
5407 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5408 fEfl &= ~fEflToClear;
5409 IEMMISC_SET_EFL(pVCpu, fEfl);
5410
5411 if (fFlags & IEM_XCPT_FLAGS_CR2)
5412 pVCpu->cpum.GstCtx.cr2 = uCr2;
5413
5414 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5415 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5416
5417 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5418}
5419
5420
5421/**
5422 * Implements exceptions and interrupts for long mode.
5423 *
5424 * @returns VBox strict status code.
5425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5426 * @param cbInstr The number of bytes to offset rIP by in the return
5427 * address.
5428 * @param u8Vector The interrupt / exception vector number.
5429 * @param fFlags The flags.
5430 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5431 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5432 */
5433IEM_STATIC VBOXSTRICTRC
5434iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5435 uint8_t cbInstr,
5436 uint8_t u8Vector,
5437 uint32_t fFlags,
5438 uint16_t uErr,
5439 uint64_t uCr2)
5440{
5441 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5442
5443 /*
5444 * Read the IDT entry.
5445 */
5446 uint16_t offIdt = (uint16_t)u8Vector << 4;
5447 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5448 {
5449 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5450 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5451 }
5452 X86DESC64 Idte;
5453#ifdef _MSC_VER /* Shut up silly compiler warning. */
5454 Idte.au64[0] = 0;
5455 Idte.au64[1] = 0;
5456#endif
5457 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5458 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5459 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5460 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5461 {
5462 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5463 return rcStrict;
5464 }
5465 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5466 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5467 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5468
5469 /*
5470 * Check the descriptor type, DPL and such.
5471 * ASSUMES this is done in the same order as described for call-gate calls.
5472 */
5473 if (Idte.Gate.u1DescType)
5474 {
5475 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5476 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5477 }
5478 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5479 switch (Idte.Gate.u4Type)
5480 {
5481 case AMD64_SEL_TYPE_SYS_INT_GATE:
5482 fEflToClear |= X86_EFL_IF;
5483 break;
5484 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5485 break;
5486
5487 default:
5488 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5489 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5490 }
5491
5492 /* Check DPL against CPL if applicable. */
5493 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5494 {
5495 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5496 {
5497 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5498 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5499 }
5500 }
5501
5502 /* Is it there? */
5503 if (!Idte.Gate.u1Present)
5504 {
5505 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5506 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5507 }
5508
5509 /* A null CS is bad. */
5510 RTSEL NewCS = Idte.Gate.u16Sel;
5511 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5512 {
5513 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5514 return iemRaiseGeneralProtectionFault0(pVCpu);
5515 }
5516
5517 /* Fetch the descriptor for the new CS. */
5518 IEMSELDESC DescCS;
5519 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5520 if (rcStrict != VINF_SUCCESS)
5521 {
5522 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5523 return rcStrict;
5524 }
5525
5526 /* Must be a 64-bit code segment. */
5527 if (!DescCS.Long.Gen.u1DescType)
5528 {
5529 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5530 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5531 }
5532 if ( !DescCS.Long.Gen.u1Long
5533 || DescCS.Long.Gen.u1DefBig
5534 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5535 {
5536 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5537 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5538 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5539 }
5540
5541 /* Don't allow lowering the privilege level. For non-conforming CS
5542 selectors, the CS.DPL sets the privilege level the trap/interrupt
5543 handler runs at. For conforming CS selectors, the CPL remains
5544 unchanged, but the CS.DPL must be <= CPL. */
5545 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5546 * when CPU in Ring-0. Result \#GP? */
5547 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5548 {
5549 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5550 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5551 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5552 }
5553
5554
5555 /* Make sure the selector is present. */
5556 if (!DescCS.Legacy.Gen.u1Present)
5557 {
5558 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5559 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5560 }
5561
5562 /* Check that the new RIP is canonical. */
5563 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5564 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5565 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5566 if (!IEM_IS_CANONICAL(uNewRip))
5567 {
5568 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5569 return iemRaiseGeneralProtectionFault0(pVCpu);
5570 }
5571
5572 /*
5573 * If the privilege level changes or if the IST isn't zero, we need to get
5574 * a new stack from the TSS.
5575 */
5576 uint64_t uNewRsp;
5577 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5578 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5579 if ( uNewCpl != pVCpu->iem.s.uCpl
5580 || Idte.Gate.u3IST != 0)
5581 {
5582 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5583 if (rcStrict != VINF_SUCCESS)
5584 return rcStrict;
5585 }
5586 else
5587 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5588 uNewRsp &= ~(uint64_t)0xf;
5589
5590 /*
5591 * Calc the flag image to push.
5592 */
5593 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5594 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5595 fEfl &= ~X86_EFL_RF;
5596 else
5597 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5598
5599 /*
5600 * Start making changes.
5601 */
5602 /* Set the new CPL so that stack accesses use it. */
5603 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5604 pVCpu->iem.s.uCpl = uNewCpl;
5605
5606 /* Create the stack frame. */
5607 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5608 RTPTRUNION uStackFrame;
5609 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5610 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5611 if (rcStrict != VINF_SUCCESS)
5612 return rcStrict;
5613 void * const pvStackFrame = uStackFrame.pv;
5614
5615 if (fFlags & IEM_XCPT_FLAGS_ERR)
5616 *uStackFrame.pu64++ = uErr;
5617 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5618 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5619 uStackFrame.pu64[2] = fEfl;
5620 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5621 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5622 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5623 if (rcStrict != VINF_SUCCESS)
5624 return rcStrict;
5625
5626 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5627 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5628 * after pushing the stack frame? (Write protect the gdt + stack to
5629 * find out.) */
5630 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5631 {
5632 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5633 if (rcStrict != VINF_SUCCESS)
5634 return rcStrict;
5635 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5636 }
5637
5638 /*
5639 * Start comitting the register changes.
5640 */
5641 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5642 * hidden registers when interrupting 32-bit or 16-bit code! */
5643 if (uNewCpl != uOldCpl)
5644 {
5645 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5646 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5647 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5648 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5649 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5650 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5651 }
5652 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5653 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5654 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5655 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5656 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5657 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5658 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5659 pVCpu->cpum.GstCtx.rip = uNewRip;
5660
5661 fEfl &= ~fEflToClear;
5662 IEMMISC_SET_EFL(pVCpu, fEfl);
5663
5664 if (fFlags & IEM_XCPT_FLAGS_CR2)
5665 pVCpu->cpum.GstCtx.cr2 = uCr2;
5666
5667 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5668 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5669
5670 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5671}
5672
5673
5674/**
5675 * Implements exceptions and interrupts.
5676 *
5677 * All exceptions and interrupts goes thru this function!
5678 *
5679 * @returns VBox strict status code.
5680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5681 * @param cbInstr The number of bytes to offset rIP by in the return
5682 * address.
5683 * @param u8Vector The interrupt / exception vector number.
5684 * @param fFlags The flags.
5685 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5686 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5687 */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5689iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5690 uint8_t cbInstr,
5691 uint8_t u8Vector,
5692 uint32_t fFlags,
5693 uint16_t uErr,
5694 uint64_t uCr2)
5695{
5696 /*
5697 * Get all the state that we might need here.
5698 */
5699 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5700 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5701
5702#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5703 /*
5704 * Flush prefetch buffer
5705 */
5706 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5707#endif
5708
5709 /*
5710 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5711 */
5712 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5713 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5714 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5715 | IEM_XCPT_FLAGS_BP_INSTR
5716 | IEM_XCPT_FLAGS_ICEBP_INSTR
5717 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5718 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5719 {
5720 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5721 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5722 u8Vector = X86_XCPT_GP;
5723 uErr = 0;
5724 }
5725#ifdef DBGFTRACE_ENABLED
5726 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5727 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5728 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5729#endif
5730
5731 /*
5732 * Evaluate whether NMI blocking should be in effect.
5733 * Normally, NMI blocking is in effect whenever we inject an NMI.
5734 */
5735 bool fBlockNmi;
5736 if ( u8Vector == X86_XCPT_NMI
5737 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5738 fBlockNmi = true;
5739 else
5740 fBlockNmi = false;
5741
5742#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5743 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5744 {
5745 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5746 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5747 return rcStrict0;
5748
5749 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5750 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5751 {
5752 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5753 fBlockNmi = false;
5754 }
5755 }
5756#endif
5757
5758#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5759 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5760 {
5761 /*
5762 * If the event is being injected as part of VMRUN, it isn't subject to event
5763 * intercepts in the nested-guest. However, secondary exceptions that occur
5764 * during injection of any event -are- subject to exception intercepts.
5765 *
5766 * See AMD spec. 15.20 "Event Injection".
5767 */
5768 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5769 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5770 else
5771 {
5772 /*
5773 * Check and handle if the event being raised is intercepted.
5774 */
5775 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5776 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5777 return rcStrict0;
5778 }
5779 }
5780#endif
5781
5782 /*
5783 * Set NMI blocking if necessary.
5784 */
5785 if ( fBlockNmi
5786 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5787 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5788
5789 /*
5790 * Do recursion accounting.
5791 */
5792 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5793 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5794 if (pVCpu->iem.s.cXcptRecursions == 0)
5795 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5796 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5797 else
5798 {
5799 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5800 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5801 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5802
5803 if (pVCpu->iem.s.cXcptRecursions >= 4)
5804 {
5805#ifdef DEBUG_bird
5806 AssertFailed();
5807#endif
5808 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5809 }
5810
5811 /*
5812 * Evaluate the sequence of recurring events.
5813 */
5814 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5815 NULL /* pXcptRaiseInfo */);
5816 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5817 { /* likely */ }
5818 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5819 {
5820 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5821 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5822 u8Vector = X86_XCPT_DF;
5823 uErr = 0;
5824#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5825 /* VMX nested-guest #DF intercept needs to be checked here. */
5826 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5827 {
5828 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5829 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5830 return rcStrict0;
5831 }
5832#endif
5833 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5834 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5835 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5836 }
5837 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5838 {
5839 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5840 return iemInitiateCpuShutdown(pVCpu);
5841 }
5842 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5843 {
5844 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5845 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5846 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5847 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5848 return VERR_EM_GUEST_CPU_HANG;
5849 }
5850 else
5851 {
5852 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5853 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5854 return VERR_IEM_IPE_9;
5855 }
5856
5857 /*
5858 * The 'EXT' bit is set when an exception occurs during deliver of an external
5859 * event (such as an interrupt or earlier exception)[1]. Privileged software
5860 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5861 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5862 *
5863 * [1] - Intel spec. 6.13 "Error Code"
5864 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5865 * [3] - Intel Instruction reference for INT n.
5866 */
5867 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5868 && (fFlags & IEM_XCPT_FLAGS_ERR)
5869 && u8Vector != X86_XCPT_PF
5870 && u8Vector != X86_XCPT_DF)
5871 {
5872 uErr |= X86_TRAP_ERR_EXTERNAL;
5873 }
5874 }
5875
5876 pVCpu->iem.s.cXcptRecursions++;
5877 pVCpu->iem.s.uCurXcpt = u8Vector;
5878 pVCpu->iem.s.fCurXcpt = fFlags;
5879 pVCpu->iem.s.uCurXcptErr = uErr;
5880 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5881
5882 /*
5883 * Extensive logging.
5884 */
5885#if defined(LOG_ENABLED) && defined(IN_RING3)
5886 if (LogIs3Enabled())
5887 {
5888 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5889 PVM pVM = pVCpu->CTX_SUFF(pVM);
5890 char szRegs[4096];
5891 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5892 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5893 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5894 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5895 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5896 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5897 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5898 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5899 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5900 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5901 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5902 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5903 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5904 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5905 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5906 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5907 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5908 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5909 " efer=%016VR{efer}\n"
5910 " pat=%016VR{pat}\n"
5911 " sf_mask=%016VR{sf_mask}\n"
5912 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5913 " lstar=%016VR{lstar}\n"
5914 " star=%016VR{star} cstar=%016VR{cstar}\n"
5915 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5916 );
5917
5918 char szInstr[256];
5919 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5920 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5921 szInstr, sizeof(szInstr), NULL);
5922 Log3(("%s%s\n", szRegs, szInstr));
5923 }
5924#endif /* LOG_ENABLED */
5925
5926 /*
5927 * Call the mode specific worker function.
5928 */
5929 VBOXSTRICTRC rcStrict;
5930 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5931 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5932 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5933 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5934 else
5935 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5936
5937 /* Flush the prefetch buffer. */
5938#ifdef IEM_WITH_CODE_TLB
5939 pVCpu->iem.s.pbInstrBuf = NULL;
5940#else
5941 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5942#endif
5943
5944 /*
5945 * Unwind.
5946 */
5947 pVCpu->iem.s.cXcptRecursions--;
5948 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5949 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5950 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5951 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5952 pVCpu->iem.s.cXcptRecursions + 1));
5953 return rcStrict;
5954}
5955
5956#ifdef IEM_WITH_SETJMP
5957/**
5958 * See iemRaiseXcptOrInt. Will not return.
5959 */
5960IEM_STATIC DECL_NO_RETURN(void)
5961iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5962 uint8_t cbInstr,
5963 uint8_t u8Vector,
5964 uint32_t fFlags,
5965 uint16_t uErr,
5966 uint64_t uCr2)
5967{
5968 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5969 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5970}
5971#endif
5972
5973
5974/** \#DE - 00. */
5975DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5976{
5977 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5978}
5979
5980
5981/** \#DB - 01.
5982 * @note This automatically clear DR7.GD. */
5983DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5984{
5985 /** @todo set/clear RF. */
5986 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5988}
5989
5990
5991/** \#BR - 05. */
5992DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5993{
5994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5995}
5996
5997
5998/** \#UD - 06. */
5999DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
6000{
6001 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6002}
6003
6004
6005/** \#NM - 07. */
6006DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
6007{
6008 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6009}
6010
6011
6012/** \#TS(err) - 0a. */
6013DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6014{
6015 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6016}
6017
6018
6019/** \#TS(tr) - 0a. */
6020DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
6021{
6022 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6023 pVCpu->cpum.GstCtx.tr.Sel, 0);
6024}
6025
6026
6027/** \#TS(0) - 0a. */
6028DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
6029{
6030 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6031 0, 0);
6032}
6033
6034
6035/** \#TS(err) - 0a. */
6036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6037{
6038 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6039 uSel & X86_SEL_MASK_OFF_RPL, 0);
6040}
6041
6042
6043/** \#NP(err) - 0b. */
6044DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6045{
6046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6047}
6048
6049
6050/** \#NP(sel) - 0b. */
6051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6052{
6053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6054 uSel & ~X86_SEL_RPL, 0);
6055}
6056
6057
6058/** \#SS(seg) - 0c. */
6059DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6060{
6061 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6062 uSel & ~X86_SEL_RPL, 0);
6063}
6064
6065
6066/** \#SS(err) - 0c. */
6067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6068{
6069 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6070}
6071
6072
6073/** \#GP(n) - 0d. */
6074DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
6075{
6076 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6077}
6078
6079
6080/** \#GP(0) - 0d. */
6081DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
6082{
6083 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6084}
6085
6086#ifdef IEM_WITH_SETJMP
6087/** \#GP(0) - 0d. */
6088DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
6089{
6090 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6091}
6092#endif
6093
6094
6095/** \#GP(sel) - 0d. */
6096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
6097{
6098 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6099 Sel & ~X86_SEL_RPL, 0);
6100}
6101
6102
6103/** \#GP(0) - 0d. */
6104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
6105{
6106 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6107}
6108
6109
6110/** \#GP(sel) - 0d. */
6111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6112{
6113 NOREF(iSegReg); NOREF(fAccess);
6114 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
6115 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6116}
6117
6118#ifdef IEM_WITH_SETJMP
6119/** \#GP(sel) - 0d, longjmp. */
6120DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6121{
6122 NOREF(iSegReg); NOREF(fAccess);
6123 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
6124 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6125}
6126#endif
6127
6128/** \#GP(sel) - 0d. */
6129DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
6130{
6131 NOREF(Sel);
6132 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6133}
6134
6135#ifdef IEM_WITH_SETJMP
6136/** \#GP(sel) - 0d, longjmp. */
6137DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
6138{
6139 NOREF(Sel);
6140 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6141}
6142#endif
6143
6144
6145/** \#GP(sel) - 0d. */
6146DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6147{
6148 NOREF(iSegReg); NOREF(fAccess);
6149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6150}
6151
6152#ifdef IEM_WITH_SETJMP
6153/** \#GP(sel) - 0d, longjmp. */
6154DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
6155 uint32_t fAccess)
6156{
6157 NOREF(iSegReg); NOREF(fAccess);
6158 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6159}
6160#endif
6161
6162
6163/** \#PF(n) - 0e. */
6164DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6165{
6166 uint16_t uErr;
6167 switch (rc)
6168 {
6169 case VERR_PAGE_NOT_PRESENT:
6170 case VERR_PAGE_TABLE_NOT_PRESENT:
6171 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
6172 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
6173 uErr = 0;
6174 break;
6175
6176 default:
6177 AssertMsgFailed(("%Rrc\n", rc));
6178 RT_FALL_THRU();
6179 case VERR_ACCESS_DENIED:
6180 uErr = X86_TRAP_PF_P;
6181 break;
6182
6183 /** @todo reserved */
6184 }
6185
6186 if (pVCpu->iem.s.uCpl == 3)
6187 uErr |= X86_TRAP_PF_US;
6188
6189 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6190 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6191 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6192 uErr |= X86_TRAP_PF_ID;
6193
6194#if 0 /* This is so much non-sense, really. Why was it done like that? */
6195 /* Note! RW access callers reporting a WRITE protection fault, will clear
6196 the READ flag before calling. So, read-modify-write accesses (RW)
6197 can safely be reported as READ faults. */
6198 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6199 uErr |= X86_TRAP_PF_RW;
6200#else
6201 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6202 {
6203 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
6204 /// (regardless of outcome of the comparison in the latter case).
6205 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
6206 uErr |= X86_TRAP_PF_RW;
6207 }
6208#endif
6209
6210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6211 uErr, GCPtrWhere);
6212}
6213
6214#ifdef IEM_WITH_SETJMP
6215/** \#PF(n) - 0e, longjmp. */
6216IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6217{
6218 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6219}
6220#endif
6221
6222
6223/** \#MF(0) - 10. */
6224DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
6225{
6226 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6227}
6228
6229
6230/** \#AC(0) - 11. */
6231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
6232{
6233 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6234}
6235
6236
6237/**
6238 * Macro for calling iemCImplRaiseDivideError().
6239 *
6240 * This enables us to add/remove arguments and force different levels of
6241 * inlining as we wish.
6242 *
6243 * @return Strict VBox status code.
6244 */
6245#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6246IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6247{
6248 NOREF(cbInstr);
6249 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6250}
6251
6252
6253/**
6254 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6255 *
6256 * This enables us to add/remove arguments and force different levels of
6257 * inlining as we wish.
6258 *
6259 * @return Strict VBox status code.
6260 */
6261#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6262IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6263{
6264 NOREF(cbInstr);
6265 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6266}
6267
6268
6269/**
6270 * Macro for calling iemCImplRaiseInvalidOpcode().
6271 *
6272 * This enables us to add/remove arguments and force different levels of
6273 * inlining as we wish.
6274 *
6275 * @return Strict VBox status code.
6276 */
6277#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6278IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6279{
6280 NOREF(cbInstr);
6281 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6282}
6283
6284
6285/** @} */
6286
6287
6288/*
6289 *
6290 * Helpers routines.
6291 * Helpers routines.
6292 * Helpers routines.
6293 *
6294 */
6295
6296/**
6297 * Recalculates the effective operand size.
6298 *
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 */
6301IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6302{
6303 switch (pVCpu->iem.s.enmCpuMode)
6304 {
6305 case IEMMODE_16BIT:
6306 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6307 break;
6308 case IEMMODE_32BIT:
6309 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6310 break;
6311 case IEMMODE_64BIT:
6312 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6313 {
6314 case 0:
6315 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6316 break;
6317 case IEM_OP_PRF_SIZE_OP:
6318 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6319 break;
6320 case IEM_OP_PRF_SIZE_REX_W:
6321 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6322 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6323 break;
6324 }
6325 break;
6326 default:
6327 AssertFailed();
6328 }
6329}
6330
6331
6332/**
6333 * Sets the default operand size to 64-bit and recalculates the effective
6334 * operand size.
6335 *
6336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6337 */
6338IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6339{
6340 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6341 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6342 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6343 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6344 else
6345 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6346}
6347
6348
6349/*
6350 *
6351 * Common opcode decoders.
6352 * Common opcode decoders.
6353 * Common opcode decoders.
6354 *
6355 */
6356//#include <iprt/mem.h>
6357
6358/**
6359 * Used to add extra details about a stub case.
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 */
6362IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6363{
6364#if defined(LOG_ENABLED) && defined(IN_RING3)
6365 PVM pVM = pVCpu->CTX_SUFF(pVM);
6366 char szRegs[4096];
6367 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6368 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6369 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6370 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6371 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6372 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6373 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6374 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6375 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6376 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6377 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6378 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6379 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6380 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6381 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6382 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6383 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6384 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6385 " efer=%016VR{efer}\n"
6386 " pat=%016VR{pat}\n"
6387 " sf_mask=%016VR{sf_mask}\n"
6388 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6389 " lstar=%016VR{lstar}\n"
6390 " star=%016VR{star} cstar=%016VR{cstar}\n"
6391 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6392 );
6393
6394 char szInstr[256];
6395 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6396 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6397 szInstr, sizeof(szInstr), NULL);
6398
6399 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6400#else
6401 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6402#endif
6403}
6404
6405/**
6406 * Complains about a stub.
6407 *
6408 * Providing two versions of this macro, one for daily use and one for use when
6409 * working on IEM.
6410 */
6411#if 0
6412# define IEMOP_BITCH_ABOUT_STUB() \
6413 do { \
6414 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6415 iemOpStubMsg2(pVCpu); \
6416 RTAssertPanic(); \
6417 } while (0)
6418#else
6419# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6420#endif
6421
6422/** Stubs an opcode. */
6423#define FNIEMOP_STUB(a_Name) \
6424 FNIEMOP_DEF(a_Name) \
6425 { \
6426 RT_NOREF_PV(pVCpu); \
6427 IEMOP_BITCH_ABOUT_STUB(); \
6428 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6429 } \
6430 typedef int ignore_semicolon
6431
6432/** Stubs an opcode. */
6433#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6434 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6435 { \
6436 RT_NOREF_PV(pVCpu); \
6437 RT_NOREF_PV(a_Name0); \
6438 IEMOP_BITCH_ABOUT_STUB(); \
6439 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6440 } \
6441 typedef int ignore_semicolon
6442
6443/** Stubs an opcode which currently should raise \#UD. */
6444#define FNIEMOP_UD_STUB(a_Name) \
6445 FNIEMOP_DEF(a_Name) \
6446 { \
6447 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6448 return IEMOP_RAISE_INVALID_OPCODE(); \
6449 } \
6450 typedef int ignore_semicolon
6451
6452/** Stubs an opcode which currently should raise \#UD. */
6453#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6454 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6455 { \
6456 RT_NOREF_PV(pVCpu); \
6457 RT_NOREF_PV(a_Name0); \
6458 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6459 return IEMOP_RAISE_INVALID_OPCODE(); \
6460 } \
6461 typedef int ignore_semicolon
6462
6463
6464
6465/** @name Register Access.
6466 * @{
6467 */
6468
6469/**
6470 * Gets a reference (pointer) to the specified hidden segment register.
6471 *
6472 * @returns Hidden register reference.
6473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6474 * @param iSegReg The segment register.
6475 */
6476IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6477{
6478 Assert(iSegReg < X86_SREG_COUNT);
6479 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6480 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6481
6482 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6483 return pSReg;
6484}
6485
6486
6487/**
6488 * Ensures that the given hidden segment register is up to date.
6489 *
6490 * @returns Hidden register reference.
6491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6492 * @param pSReg The segment register.
6493 */
6494IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6495{
6496 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6497 NOREF(pVCpu);
6498 return pSReg;
6499}
6500
6501
6502/**
6503 * Gets a reference (pointer) to the specified segment register (the selector
6504 * value).
6505 *
6506 * @returns Pointer to the selector variable.
6507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6508 * @param iSegReg The segment register.
6509 */
6510DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6511{
6512 Assert(iSegReg < X86_SREG_COUNT);
6513 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6514 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6515}
6516
6517
6518/**
6519 * Fetches the selector value of a segment register.
6520 *
6521 * @returns The selector value.
6522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6523 * @param iSegReg The segment register.
6524 */
6525DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6526{
6527 Assert(iSegReg < X86_SREG_COUNT);
6528 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6529 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6530}
6531
6532
6533/**
6534 * Fetches the base address value of a segment register.
6535 *
6536 * @returns The selector value.
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param iSegReg The segment register.
6539 */
6540DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6541{
6542 Assert(iSegReg < X86_SREG_COUNT);
6543 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6544 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6545}
6546
6547
6548/**
6549 * Gets a reference (pointer) to the specified general purpose register.
6550 *
6551 * @returns Register reference.
6552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6553 * @param iReg The general purpose register.
6554 */
6555DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6556{
6557 Assert(iReg < 16);
6558 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6559}
6560
6561
6562/**
6563 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6564 *
6565 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6566 *
6567 * @returns Register reference.
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 * @param iReg The register.
6570 */
6571DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6572{
6573 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6574 {
6575 Assert(iReg < 16);
6576 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6577 }
6578 /* high 8-bit register. */
6579 Assert(iReg < 8);
6580 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6581}
6582
6583
6584/**
6585 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6586 *
6587 * @returns Register reference.
6588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6589 * @param iReg The register.
6590 */
6591DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6592{
6593 Assert(iReg < 16);
6594 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6595}
6596
6597
6598/**
6599 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6600 *
6601 * @returns Register reference.
6602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6603 * @param iReg The register.
6604 */
6605DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6606{
6607 Assert(iReg < 16);
6608 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6609}
6610
6611
6612/**
6613 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6614 *
6615 * @returns Register reference.
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 * @param iReg The register.
6618 */
6619DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6620{
6621 Assert(iReg < 64);
6622 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6623}
6624
6625
6626/**
6627 * Gets a reference (pointer) to the specified segment register's base address.
6628 *
6629 * @returns Segment register base address reference.
6630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6631 * @param iSegReg The segment selector.
6632 */
6633DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6634{
6635 Assert(iSegReg < X86_SREG_COUNT);
6636 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6637 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6638}
6639
6640
6641/**
6642 * Fetches the value of a 8-bit general purpose register.
6643 *
6644 * @returns The register value.
6645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6646 * @param iReg The register.
6647 */
6648DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6649{
6650 return *iemGRegRefU8(pVCpu, iReg);
6651}
6652
6653
6654/**
6655 * Fetches the value of a 16-bit general purpose register.
6656 *
6657 * @returns The register value.
6658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6659 * @param iReg The register.
6660 */
6661DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6662{
6663 Assert(iReg < 16);
6664 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6665}
6666
6667
6668/**
6669 * Fetches the value of a 32-bit general purpose register.
6670 *
6671 * @returns The register value.
6672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6673 * @param iReg The register.
6674 */
6675DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6676{
6677 Assert(iReg < 16);
6678 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6679}
6680
6681
6682/**
6683 * Fetches the value of a 64-bit general purpose register.
6684 *
6685 * @returns The register value.
6686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6687 * @param iReg The register.
6688 */
6689DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6690{
6691 Assert(iReg < 16);
6692 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6693}
6694
6695
6696/**
6697 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6698 *
6699 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6700 * segment limit.
6701 *
6702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6703 * @param offNextInstr The offset of the next instruction.
6704 */
6705IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6706{
6707 switch (pVCpu->iem.s.enmEffOpSize)
6708 {
6709 case IEMMODE_16BIT:
6710 {
6711 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6712 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6713 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6714 return iemRaiseGeneralProtectionFault0(pVCpu);
6715 pVCpu->cpum.GstCtx.rip = uNewIp;
6716 break;
6717 }
6718
6719 case IEMMODE_32BIT:
6720 {
6721 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6722 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6723
6724 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6725 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6726 return iemRaiseGeneralProtectionFault0(pVCpu);
6727 pVCpu->cpum.GstCtx.rip = uNewEip;
6728 break;
6729 }
6730
6731 case IEMMODE_64BIT:
6732 {
6733 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6734
6735 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6736 if (!IEM_IS_CANONICAL(uNewRip))
6737 return iemRaiseGeneralProtectionFault0(pVCpu);
6738 pVCpu->cpum.GstCtx.rip = uNewRip;
6739 break;
6740 }
6741
6742 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6743 }
6744
6745 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6746
6747#ifndef IEM_WITH_CODE_TLB
6748 /* Flush the prefetch buffer. */
6749 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6750#endif
6751
6752 return VINF_SUCCESS;
6753}
6754
6755
6756/**
6757 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6758 *
6759 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6760 * segment limit.
6761 *
6762 * @returns Strict VBox status code.
6763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6764 * @param offNextInstr The offset of the next instruction.
6765 */
6766IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6767{
6768 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6769
6770 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6771 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6772 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6773 return iemRaiseGeneralProtectionFault0(pVCpu);
6774 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6775 pVCpu->cpum.GstCtx.rip = uNewIp;
6776 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6777
6778#ifndef IEM_WITH_CODE_TLB
6779 /* Flush the prefetch buffer. */
6780 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6781#endif
6782
6783 return VINF_SUCCESS;
6784}
6785
6786
6787/**
6788 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6789 *
6790 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6791 * segment limit.
6792 *
6793 * @returns Strict VBox status code.
6794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6795 * @param offNextInstr The offset of the next instruction.
6796 */
6797IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6798{
6799 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6800
6801 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6802 {
6803 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6804
6805 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6806 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6807 return iemRaiseGeneralProtectionFault0(pVCpu);
6808 pVCpu->cpum.GstCtx.rip = uNewEip;
6809 }
6810 else
6811 {
6812 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6813
6814 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6815 if (!IEM_IS_CANONICAL(uNewRip))
6816 return iemRaiseGeneralProtectionFault0(pVCpu);
6817 pVCpu->cpum.GstCtx.rip = uNewRip;
6818 }
6819 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6820
6821#ifndef IEM_WITH_CODE_TLB
6822 /* Flush the prefetch buffer. */
6823 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6824#endif
6825
6826 return VINF_SUCCESS;
6827}
6828
6829
6830/**
6831 * Performs a near jump to the specified address.
6832 *
6833 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6834 * segment limit.
6835 *
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 * @param uNewRip The new RIP value.
6838 */
6839IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6840{
6841 switch (pVCpu->iem.s.enmEffOpSize)
6842 {
6843 case IEMMODE_16BIT:
6844 {
6845 Assert(uNewRip <= UINT16_MAX);
6846 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6847 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6848 return iemRaiseGeneralProtectionFault0(pVCpu);
6849 /** @todo Test 16-bit jump in 64-bit mode. */
6850 pVCpu->cpum.GstCtx.rip = uNewRip;
6851 break;
6852 }
6853
6854 case IEMMODE_32BIT:
6855 {
6856 Assert(uNewRip <= UINT32_MAX);
6857 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6858 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6859
6860 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6861 return iemRaiseGeneralProtectionFault0(pVCpu);
6862 pVCpu->cpum.GstCtx.rip = uNewRip;
6863 break;
6864 }
6865
6866 case IEMMODE_64BIT:
6867 {
6868 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6869
6870 if (!IEM_IS_CANONICAL(uNewRip))
6871 return iemRaiseGeneralProtectionFault0(pVCpu);
6872 pVCpu->cpum.GstCtx.rip = uNewRip;
6873 break;
6874 }
6875
6876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6877 }
6878
6879 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6880
6881#ifndef IEM_WITH_CODE_TLB
6882 /* Flush the prefetch buffer. */
6883 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6884#endif
6885
6886 return VINF_SUCCESS;
6887}
6888
6889
6890/**
6891 * Get the address of the top of the stack.
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6896{
6897 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6898 return pVCpu->cpum.GstCtx.rsp;
6899 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6900 return pVCpu->cpum.GstCtx.esp;
6901 return pVCpu->cpum.GstCtx.sp;
6902}
6903
6904
6905/**
6906 * Updates the RIP/EIP/IP to point to the next instruction.
6907 *
6908 * This function leaves the EFLAGS.RF flag alone.
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 * @param cbInstr The number of bytes to add.
6912 */
6913IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6914{
6915 switch (pVCpu->iem.s.enmCpuMode)
6916 {
6917 case IEMMODE_16BIT:
6918 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6919 pVCpu->cpum.GstCtx.eip += cbInstr;
6920 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6921 break;
6922
6923 case IEMMODE_32BIT:
6924 pVCpu->cpum.GstCtx.eip += cbInstr;
6925 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6926 break;
6927
6928 case IEMMODE_64BIT:
6929 pVCpu->cpum.GstCtx.rip += cbInstr;
6930 break;
6931 default: AssertFailed();
6932 }
6933}
6934
6935
6936#if 0
6937/**
6938 * Updates the RIP/EIP/IP to point to the next instruction.
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6943{
6944 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6945}
6946#endif
6947
6948
6949
6950/**
6951 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6952 *
6953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6954 * @param cbInstr The number of bytes to add.
6955 */
6956IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6957{
6958 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6959
6960 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6961#if ARCH_BITS >= 64
6962 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6963 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6964 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6965#else
6966 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6967 pVCpu->cpum.GstCtx.rip += cbInstr;
6968 else
6969 pVCpu->cpum.GstCtx.eip += cbInstr;
6970#endif
6971}
6972
6973
6974/**
6975 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6976 *
6977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6978 */
6979IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6980{
6981 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6982}
6983
6984
6985/**
6986 * Adds to the stack pointer.
6987 *
6988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6989 * @param cbToAdd The number of bytes to add (8-bit!).
6990 */
6991DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6992{
6993 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6994 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6995 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6996 pVCpu->cpum.GstCtx.esp += cbToAdd;
6997 else
6998 pVCpu->cpum.GstCtx.sp += cbToAdd;
6999}
7000
7001
7002/**
7003 * Subtracts from the stack pointer.
7004 *
7005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7006 * @param cbToSub The number of bytes to subtract (8-bit!).
7007 */
7008DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
7009{
7010 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7011 pVCpu->cpum.GstCtx.rsp -= cbToSub;
7012 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7013 pVCpu->cpum.GstCtx.esp -= cbToSub;
7014 else
7015 pVCpu->cpum.GstCtx.sp -= cbToSub;
7016}
7017
7018
7019/**
7020 * Adds to the temporary stack pointer.
7021 *
7022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7023 * @param pTmpRsp The temporary SP/ESP/RSP to update.
7024 * @param cbToAdd The number of bytes to add (16-bit).
7025 */
7026DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
7027{
7028 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7029 pTmpRsp->u += cbToAdd;
7030 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7031 pTmpRsp->DWords.dw0 += cbToAdd;
7032 else
7033 pTmpRsp->Words.w0 += cbToAdd;
7034}
7035
7036
7037/**
7038 * Subtracts from the temporary stack pointer.
7039 *
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 * @param pTmpRsp The temporary SP/ESP/RSP to update.
7042 * @param cbToSub The number of bytes to subtract.
7043 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
7044 * expecting that.
7045 */
7046DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
7047{
7048 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7049 pTmpRsp->u -= cbToSub;
7050 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7051 pTmpRsp->DWords.dw0 -= cbToSub;
7052 else
7053 pTmpRsp->Words.w0 -= cbToSub;
7054}
7055
7056
7057/**
7058 * Calculates the effective stack address for a push of the specified size as
7059 * well as the new RSP value (upper bits may be masked).
7060 *
7061 * @returns Effective stack addressf for the push.
7062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7063 * @param cbItem The size of the stack item to pop.
7064 * @param puNewRsp Where to return the new RSP value.
7065 */
7066DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
7067{
7068 RTUINT64U uTmpRsp;
7069 RTGCPTR GCPtrTop;
7070 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
7071
7072 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7073 GCPtrTop = uTmpRsp.u -= cbItem;
7074 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7075 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
7076 else
7077 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
7078 *puNewRsp = uTmpRsp.u;
7079 return GCPtrTop;
7080}
7081
7082
7083/**
7084 * Gets the current stack pointer and calculates the value after a pop of the
7085 * specified size.
7086 *
7087 * @returns Current stack pointer.
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 * @param cbItem The size of the stack item to pop.
7090 * @param puNewRsp Where to return the new RSP value.
7091 */
7092DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
7093{
7094 RTUINT64U uTmpRsp;
7095 RTGCPTR GCPtrTop;
7096 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
7097
7098 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7099 {
7100 GCPtrTop = uTmpRsp.u;
7101 uTmpRsp.u += cbItem;
7102 }
7103 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7104 {
7105 GCPtrTop = uTmpRsp.DWords.dw0;
7106 uTmpRsp.DWords.dw0 += cbItem;
7107 }
7108 else
7109 {
7110 GCPtrTop = uTmpRsp.Words.w0;
7111 uTmpRsp.Words.w0 += cbItem;
7112 }
7113 *puNewRsp = uTmpRsp.u;
7114 return GCPtrTop;
7115}
7116
7117
7118/**
7119 * Calculates the effective stack address for a push of the specified size as
7120 * well as the new temporary RSP value (upper bits may be masked).
7121 *
7122 * @returns Effective stack addressf for the push.
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 * @param pTmpRsp The temporary stack pointer. This is updated.
7125 * @param cbItem The size of the stack item to pop.
7126 */
7127DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
7128{
7129 RTGCPTR GCPtrTop;
7130
7131 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7132 GCPtrTop = pTmpRsp->u -= cbItem;
7133 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7134 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
7135 else
7136 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
7137 return GCPtrTop;
7138}
7139
7140
7141/**
7142 * Gets the effective stack address for a pop of the specified size and
7143 * calculates and updates the temporary RSP.
7144 *
7145 * @returns Current stack pointer.
7146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7147 * @param pTmpRsp The temporary stack pointer. This is updated.
7148 * @param cbItem The size of the stack item to pop.
7149 */
7150DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
7151{
7152 RTGCPTR GCPtrTop;
7153 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7154 {
7155 GCPtrTop = pTmpRsp->u;
7156 pTmpRsp->u += cbItem;
7157 }
7158 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7159 {
7160 GCPtrTop = pTmpRsp->DWords.dw0;
7161 pTmpRsp->DWords.dw0 += cbItem;
7162 }
7163 else
7164 {
7165 GCPtrTop = pTmpRsp->Words.w0;
7166 pTmpRsp->Words.w0 += cbItem;
7167 }
7168 return GCPtrTop;
7169}
7170
7171/** @} */
7172
7173
7174/** @name FPU access and helpers.
7175 *
7176 * @{
7177 */
7178
7179
7180/**
7181 * Hook for preparing to use the host FPU.
7182 *
7183 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7184 *
7185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7186 */
7187DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
7188{
7189#ifdef IN_RING3
7190 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7191#else
7192 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7193#endif
7194 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7195}
7196
7197
7198/**
7199 * Hook for preparing to use the host FPU for SSE.
7200 *
7201 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7202 *
7203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7204 */
7205DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
7206{
7207 iemFpuPrepareUsage(pVCpu);
7208}
7209
7210
7211/**
7212 * Hook for preparing to use the host FPU for AVX.
7213 *
7214 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7215 *
7216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7217 */
7218DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
7219{
7220 iemFpuPrepareUsage(pVCpu);
7221}
7222
7223
7224/**
7225 * Hook for actualizing the guest FPU state before the interpreter reads it.
7226 *
7227 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7228 *
7229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7230 */
7231DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
7232{
7233#ifdef IN_RING3
7234 NOREF(pVCpu);
7235#else
7236 CPUMRZFpuStateActualizeForRead(pVCpu);
7237#endif
7238 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7239}
7240
7241
7242/**
7243 * Hook for actualizing the guest FPU state before the interpreter changes it.
7244 *
7245 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7246 *
7247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7248 */
7249DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
7250{
7251#ifdef IN_RING3
7252 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7253#else
7254 CPUMRZFpuStateActualizeForChange(pVCpu);
7255#endif
7256 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7257}
7258
7259
7260/**
7261 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7262 * only.
7263 *
7264 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7265 *
7266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7267 */
7268DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
7269{
7270#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7271 NOREF(pVCpu);
7272#else
7273 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7274#endif
7275 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7276}
7277
7278
7279/**
7280 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7281 * read+write.
7282 *
7283 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 */
7287DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
7288{
7289#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7291#else
7292 CPUMRZFpuStateActualizeForChange(pVCpu);
7293#endif
7294 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7295
7296 /* Make sure any changes are loaded the next time around. */
7297 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7298}
7299
7300
7301/**
7302 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7303 * only.
7304 *
7305 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7306 *
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 */
7309DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7310{
7311#ifdef IN_RING3
7312 NOREF(pVCpu);
7313#else
7314 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7315#endif
7316 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7317}
7318
7319
7320/**
7321 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7322 * read+write.
7323 *
7324 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7325 *
7326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7327 */
7328DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7329{
7330#ifdef IN_RING3
7331 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7332#else
7333 CPUMRZFpuStateActualizeForChange(pVCpu);
7334#endif
7335 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7336
7337 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7338 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7339}
7340
7341
7342/**
7343 * Stores a QNaN value into a FPU register.
7344 *
7345 * @param pReg Pointer to the register.
7346 */
7347DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7348{
7349 pReg->au32[0] = UINT32_C(0x00000000);
7350 pReg->au32[1] = UINT32_C(0xc0000000);
7351 pReg->au16[4] = UINT16_C(0xffff);
7352}
7353
7354
7355/**
7356 * Updates the FOP, FPU.CS and FPUIP registers.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pFpuCtx The FPU context.
7360 */
7361DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7362{
7363 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7364 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7365 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7366 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7367 {
7368 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7369 * happens in real mode here based on the fnsave and fnstenv images. */
7370 pFpuCtx->CS = 0;
7371 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7372 }
7373 else if (!IEM_IS_LONG_MODE(pVCpu))
7374 {
7375 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7376 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7377 }
7378 else
7379 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7380}
7381
7382
7383/**
7384 * Updates the x87.DS and FPUDP registers.
7385 *
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param pFpuCtx The FPU context.
7388 * @param iEffSeg The effective segment register.
7389 * @param GCPtrEff The effective address relative to @a iEffSeg.
7390 */
7391DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7392{
7393 RTSEL sel;
7394 switch (iEffSeg)
7395 {
7396 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7397 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7398 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7399 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7400 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7401 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7402 default:
7403 AssertMsgFailed(("%d\n", iEffSeg));
7404 sel = pVCpu->cpum.GstCtx.ds.Sel;
7405 }
7406 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7407 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7408 {
7409 pFpuCtx->DS = 0;
7410 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7411 }
7412 else if (!IEM_IS_LONG_MODE(pVCpu))
7413 {
7414 pFpuCtx->DS = sel;
7415 pFpuCtx->FPUDP = GCPtrEff;
7416 }
7417 else
7418 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
7419}
7420
7421
7422/**
7423 * Rotates the stack registers in the push direction.
7424 *
7425 * @param pFpuCtx The FPU context.
7426 * @remarks This is a complete waste of time, but fxsave stores the registers in
7427 * stack order.
7428 */
7429DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7430{
7431 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7432 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7433 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7434 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7435 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7436 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7437 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7438 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7439 pFpuCtx->aRegs[0].r80 = r80Tmp;
7440}
7441
7442
7443/**
7444 * Rotates the stack registers in the pop direction.
7445 *
7446 * @param pFpuCtx The FPU context.
7447 * @remarks This is a complete waste of time, but fxsave stores the registers in
7448 * stack order.
7449 */
7450DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7451{
7452 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7453 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7454 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7455 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7456 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7457 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7458 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7459 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7460 pFpuCtx->aRegs[7].r80 = r80Tmp;
7461}
7462
7463
7464/**
7465 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7466 * exception prevents it.
7467 *
7468 * @param pResult The FPU operation result to push.
7469 * @param pFpuCtx The FPU context.
7470 */
7471IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7472{
7473 /* Update FSW and bail if there are pending exceptions afterwards. */
7474 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7475 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7476 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7477 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7478 {
7479 pFpuCtx->FSW = fFsw;
7480 return;
7481 }
7482
7483 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7484 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7485 {
7486 /* All is fine, push the actual value. */
7487 pFpuCtx->FTW |= RT_BIT(iNewTop);
7488 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7489 }
7490 else if (pFpuCtx->FCW & X86_FCW_IM)
7491 {
7492 /* Masked stack overflow, push QNaN. */
7493 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7494 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7495 }
7496 else
7497 {
7498 /* Raise stack overflow, don't push anything. */
7499 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7500 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7501 return;
7502 }
7503
7504 fFsw &= ~X86_FSW_TOP_MASK;
7505 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7506 pFpuCtx->FSW = fFsw;
7507
7508 iemFpuRotateStackPush(pFpuCtx);
7509}
7510
7511
7512/**
7513 * Stores a result in a FPU register and updates the FSW and FTW.
7514 *
7515 * @param pFpuCtx The FPU context.
7516 * @param pResult The result to store.
7517 * @param iStReg Which FPU register to store it in.
7518 */
7519IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7520{
7521 Assert(iStReg < 8);
7522 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7523 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7524 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7525 pFpuCtx->FTW |= RT_BIT(iReg);
7526 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7527}
7528
7529
7530/**
7531 * Only updates the FPU status word (FSW) with the result of the current
7532 * instruction.
7533 *
7534 * @param pFpuCtx The FPU context.
7535 * @param u16FSW The FSW output of the current instruction.
7536 */
7537IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7538{
7539 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7540 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7541}
7542
7543
7544/**
7545 * Pops one item off the FPU stack if no pending exception prevents it.
7546 *
7547 * @param pFpuCtx The FPU context.
7548 */
7549IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7550{
7551 /* Check pending exceptions. */
7552 uint16_t uFSW = pFpuCtx->FSW;
7553 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7554 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7555 return;
7556
7557 /* TOP--. */
7558 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7559 uFSW &= ~X86_FSW_TOP_MASK;
7560 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7561 pFpuCtx->FSW = uFSW;
7562
7563 /* Mark the previous ST0 as empty. */
7564 iOldTop >>= X86_FSW_TOP_SHIFT;
7565 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7566
7567 /* Rotate the registers. */
7568 iemFpuRotateStackPop(pFpuCtx);
7569}
7570
7571
7572/**
7573 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7574 *
7575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7576 * @param pResult The FPU operation result to push.
7577 */
7578IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7579{
7580 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7581 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7582 iemFpuMaybePushResult(pResult, pFpuCtx);
7583}
7584
7585
7586/**
7587 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7588 * and sets FPUDP and FPUDS.
7589 *
7590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7591 * @param pResult The FPU operation result to push.
7592 * @param iEffSeg The effective segment register.
7593 * @param GCPtrEff The effective address relative to @a iEffSeg.
7594 */
7595IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7596{
7597 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7598 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7599 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7600 iemFpuMaybePushResult(pResult, pFpuCtx);
7601}
7602
7603
7604/**
7605 * Replace ST0 with the first value and push the second onto the FPU stack,
7606 * unless a pending exception prevents it.
7607 *
7608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7609 * @param pResult The FPU operation result to store and push.
7610 */
7611IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7612{
7613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7614 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7615
7616 /* Update FSW and bail if there are pending exceptions afterwards. */
7617 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7618 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7619 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7620 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7621 {
7622 pFpuCtx->FSW = fFsw;
7623 return;
7624 }
7625
7626 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7627 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7628 {
7629 /* All is fine, push the actual value. */
7630 pFpuCtx->FTW |= RT_BIT(iNewTop);
7631 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7632 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7633 }
7634 else if (pFpuCtx->FCW & X86_FCW_IM)
7635 {
7636 /* Masked stack overflow, push QNaN. */
7637 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7638 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7639 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7640 }
7641 else
7642 {
7643 /* Raise stack overflow, don't push anything. */
7644 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7645 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7646 return;
7647 }
7648
7649 fFsw &= ~X86_FSW_TOP_MASK;
7650 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7651 pFpuCtx->FSW = fFsw;
7652
7653 iemFpuRotateStackPush(pFpuCtx);
7654}
7655
7656
7657/**
7658 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7659 * FOP.
7660 *
7661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7662 * @param pResult The result to store.
7663 * @param iStReg Which FPU register to store it in.
7664 */
7665IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7666{
7667 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7668 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7669 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7670}
7671
7672
7673/**
7674 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7675 * FOP, and then pops the stack.
7676 *
7677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7678 * @param pResult The result to store.
7679 * @param iStReg Which FPU register to store it in.
7680 */
7681IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7682{
7683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7686 iemFpuMaybePopOne(pFpuCtx);
7687}
7688
7689
7690/**
7691 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7692 * FPUDP, and FPUDS.
7693 *
7694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7695 * @param pResult The result to store.
7696 * @param iStReg Which FPU register to store it in.
7697 * @param iEffSeg The effective memory operand selector register.
7698 * @param GCPtrEff The effective memory operand offset.
7699 */
7700IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7701 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7702{
7703 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7704 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7705 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7706 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7707}
7708
7709
7710/**
7711 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7712 * FPUDP, and FPUDS, and then pops the stack.
7713 *
7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7715 * @param pResult The result to store.
7716 * @param iStReg Which FPU register to store it in.
7717 * @param iEffSeg The effective memory operand selector register.
7718 * @param GCPtrEff The effective memory operand offset.
7719 */
7720IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7721 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7722{
7723 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7724 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7726 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7727 iemFpuMaybePopOne(pFpuCtx);
7728}
7729
7730
7731/**
7732 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7733 *
7734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7735 */
7736IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7737{
7738 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7739 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7740}
7741
7742
7743/**
7744 * Marks the specified stack register as free (for FFREE).
7745 *
7746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7747 * @param iStReg The register to free.
7748 */
7749IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7750{
7751 Assert(iStReg < 8);
7752 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7753 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7754 pFpuCtx->FTW &= ~RT_BIT(iReg);
7755}
7756
7757
7758/**
7759 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7760 *
7761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7762 */
7763IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7764{
7765 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7766 uint16_t uFsw = pFpuCtx->FSW;
7767 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7768 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7769 uFsw &= ~X86_FSW_TOP_MASK;
7770 uFsw |= uTop;
7771 pFpuCtx->FSW = uFsw;
7772}
7773
7774
7775/**
7776 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7777 *
7778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7779 */
7780IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7781{
7782 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7783 uint16_t uFsw = pFpuCtx->FSW;
7784 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7785 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7786 uFsw &= ~X86_FSW_TOP_MASK;
7787 uFsw |= uTop;
7788 pFpuCtx->FSW = uFsw;
7789}
7790
7791
7792/**
7793 * Updates the FSW, FOP, FPUIP, and FPUCS.
7794 *
7795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7796 * @param u16FSW The FSW from the current instruction.
7797 */
7798IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7799{
7800 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7801 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7802 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7803}
7804
7805
7806/**
7807 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7808 *
7809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7810 * @param u16FSW The FSW from the current instruction.
7811 */
7812IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7813{
7814 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7815 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7816 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7817 iemFpuMaybePopOne(pFpuCtx);
7818}
7819
7820
7821/**
7822 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7823 *
7824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7825 * @param u16FSW The FSW from the current instruction.
7826 * @param iEffSeg The effective memory operand selector register.
7827 * @param GCPtrEff The effective memory operand offset.
7828 */
7829IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7830{
7831 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7832 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7833 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7834 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7835}
7836
7837
7838/**
7839 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7840 *
7841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7842 * @param u16FSW The FSW from the current instruction.
7843 */
7844IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7845{
7846 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7847 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7848 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7849 iemFpuMaybePopOne(pFpuCtx);
7850 iemFpuMaybePopOne(pFpuCtx);
7851}
7852
7853
7854/**
7855 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7856 *
7857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7858 * @param u16FSW The FSW from the current instruction.
7859 * @param iEffSeg The effective memory operand selector register.
7860 * @param GCPtrEff The effective memory operand offset.
7861 */
7862IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7863{
7864 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7865 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7866 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7867 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7868 iemFpuMaybePopOne(pFpuCtx);
7869}
7870
7871
7872/**
7873 * Worker routine for raising an FPU stack underflow exception.
7874 *
7875 * @param pFpuCtx The FPU context.
7876 * @param iStReg The stack register being accessed.
7877 */
7878IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7879{
7880 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7881 if (pFpuCtx->FCW & X86_FCW_IM)
7882 {
7883 /* Masked underflow. */
7884 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7885 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7886 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7887 if (iStReg != UINT8_MAX)
7888 {
7889 pFpuCtx->FTW |= RT_BIT(iReg);
7890 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7891 }
7892 }
7893 else
7894 {
7895 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7896 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7897 }
7898}
7899
7900
7901/**
7902 * Raises a FPU stack underflow exception.
7903 *
7904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7905 * @param iStReg The destination register that should be loaded
7906 * with QNaN if \#IS is not masked. Specify
7907 * UINT8_MAX if none (like for fcom).
7908 */
7909DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7910{
7911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7912 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7913 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7914}
7915
7916
7917DECL_NO_INLINE(IEM_STATIC, void)
7918iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7919{
7920 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7921 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7922 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7923 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7924}
7925
7926
7927DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7928{
7929 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7930 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7931 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7932 iemFpuMaybePopOne(pFpuCtx);
7933}
7934
7935
7936DECL_NO_INLINE(IEM_STATIC, void)
7937iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7938{
7939 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7940 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7941 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7942 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7943 iemFpuMaybePopOne(pFpuCtx);
7944}
7945
7946
7947DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7948{
7949 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7950 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7951 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7952 iemFpuMaybePopOne(pFpuCtx);
7953 iemFpuMaybePopOne(pFpuCtx);
7954}
7955
7956
7957DECL_NO_INLINE(IEM_STATIC, void)
7958iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7959{
7960 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7961 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7962
7963 if (pFpuCtx->FCW & X86_FCW_IM)
7964 {
7965 /* Masked overflow - Push QNaN. */
7966 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7967 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7968 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7969 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7970 pFpuCtx->FTW |= RT_BIT(iNewTop);
7971 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7972 iemFpuRotateStackPush(pFpuCtx);
7973 }
7974 else
7975 {
7976 /* Exception pending - don't change TOP or the register stack. */
7977 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7979 }
7980}
7981
7982
7983DECL_NO_INLINE(IEM_STATIC, void)
7984iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7985{
7986 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7987 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7988
7989 if (pFpuCtx->FCW & X86_FCW_IM)
7990 {
7991 /* Masked overflow - Push QNaN. */
7992 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7993 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7994 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7995 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7996 pFpuCtx->FTW |= RT_BIT(iNewTop);
7997 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7998 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7999 iemFpuRotateStackPush(pFpuCtx);
8000 }
8001 else
8002 {
8003 /* Exception pending - don't change TOP or the register stack. */
8004 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8005 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8006 }
8007}
8008
8009
8010/**
8011 * Worker routine for raising an FPU stack overflow exception on a push.
8012 *
8013 * @param pFpuCtx The FPU context.
8014 */
8015IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
8016{
8017 if (pFpuCtx->FCW & X86_FCW_IM)
8018 {
8019 /* Masked overflow. */
8020 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
8021 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
8022 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8023 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
8024 pFpuCtx->FTW |= RT_BIT(iNewTop);
8025 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
8026 iemFpuRotateStackPush(pFpuCtx);
8027 }
8028 else
8029 {
8030 /* Exception pending - don't change TOP or the register stack. */
8031 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8032 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8033 }
8034}
8035
8036
8037/**
8038 * Raises a FPU stack overflow exception on a push.
8039 *
8040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8041 */
8042DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
8043{
8044 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8045 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8046 iemFpuStackPushOverflowOnly(pFpuCtx);
8047}
8048
8049
8050/**
8051 * Raises a FPU stack overflow exception on a push with a memory operand.
8052 *
8053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8054 * @param iEffSeg The effective memory operand selector register.
8055 * @param GCPtrEff The effective memory operand offset.
8056 */
8057DECL_NO_INLINE(IEM_STATIC, void)
8058iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8059{
8060 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8061 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8062 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8063 iemFpuStackPushOverflowOnly(pFpuCtx);
8064}
8065
8066
8067IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
8068{
8069 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8070 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
8071 if (pFpuCtx->FTW & RT_BIT(iReg))
8072 return VINF_SUCCESS;
8073 return VERR_NOT_FOUND;
8074}
8075
8076
8077IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
8078{
8079 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8080 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
8081 if (pFpuCtx->FTW & RT_BIT(iReg))
8082 {
8083 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
8084 return VINF_SUCCESS;
8085 }
8086 return VERR_NOT_FOUND;
8087}
8088
8089
8090IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
8091 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
8092{
8093 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8094 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8095 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
8096 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
8097 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
8098 {
8099 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
8100 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
8101 return VINF_SUCCESS;
8102 }
8103 return VERR_NOT_FOUND;
8104}
8105
8106
8107IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
8108{
8109 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8110 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8111 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
8112 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
8113 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
8114 {
8115 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
8116 return VINF_SUCCESS;
8117 }
8118 return VERR_NOT_FOUND;
8119}
8120
8121
8122/**
8123 * Updates the FPU exception status after FCW is changed.
8124 *
8125 * @param pFpuCtx The FPU context.
8126 */
8127IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
8128{
8129 uint16_t u16Fsw = pFpuCtx->FSW;
8130 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
8131 u16Fsw |= X86_FSW_ES | X86_FSW_B;
8132 else
8133 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
8134 pFpuCtx->FSW = u16Fsw;
8135}
8136
8137
8138/**
8139 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
8140 *
8141 * @returns The full FTW.
8142 * @param pFpuCtx The FPU context.
8143 */
8144IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
8145{
8146 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
8147 uint16_t u16Ftw = 0;
8148 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8149 for (unsigned iSt = 0; iSt < 8; iSt++)
8150 {
8151 unsigned const iReg = (iSt + iTop) & 7;
8152 if (!(u8Ftw & RT_BIT(iReg)))
8153 u16Ftw |= 3 << (iReg * 2); /* empty */
8154 else
8155 {
8156 uint16_t uTag;
8157 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
8158 if (pr80Reg->s.uExponent == 0x7fff)
8159 uTag = 2; /* Exponent is all 1's => Special. */
8160 else if (pr80Reg->s.uExponent == 0x0000)
8161 {
8162 if (pr80Reg->s.u64Mantissa == 0x0000)
8163 uTag = 1; /* All bits are zero => Zero. */
8164 else
8165 uTag = 2; /* Must be special. */
8166 }
8167 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
8168 uTag = 0; /* Valid. */
8169 else
8170 uTag = 2; /* Must be special. */
8171
8172 u16Ftw |= uTag << (iReg * 2); /* empty */
8173 }
8174 }
8175
8176 return u16Ftw;
8177}
8178
8179
8180/**
8181 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
8182 *
8183 * @returns The compressed FTW.
8184 * @param u16FullFtw The full FTW to convert.
8185 */
8186IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
8187{
8188 uint8_t u8Ftw = 0;
8189 for (unsigned i = 0; i < 8; i++)
8190 {
8191 if ((u16FullFtw & 3) != 3 /*empty*/)
8192 u8Ftw |= RT_BIT(i);
8193 u16FullFtw >>= 2;
8194 }
8195
8196 return u8Ftw;
8197}
8198
8199/** @} */
8200
8201
8202/** @name Memory access.
8203 *
8204 * @{
8205 */
8206
8207
8208/**
8209 * Updates the IEMCPU::cbWritten counter if applicable.
8210 *
8211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8212 * @param fAccess The access being accounted for.
8213 * @param cbMem The access size.
8214 */
8215DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
8216{
8217 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8218 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8219 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8220}
8221
8222
8223/**
8224 * Checks if the given segment can be written to, raise the appropriate
8225 * exception if not.
8226 *
8227 * @returns VBox strict status code.
8228 *
8229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8230 * @param pHid Pointer to the hidden register.
8231 * @param iSegReg The register number.
8232 * @param pu64BaseAddr Where to return the base address to use for the
8233 * segment. (In 64-bit code it may differ from the
8234 * base in the hidden segment.)
8235 */
8236IEM_STATIC VBOXSTRICTRC
8237iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8238{
8239 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8240
8241 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8242 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8243 else
8244 {
8245 if (!pHid->Attr.n.u1Present)
8246 {
8247 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8248 AssertRelease(uSel == 0);
8249 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8250 return iemRaiseGeneralProtectionFault0(pVCpu);
8251 }
8252
8253 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8254 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8255 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8256 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8257 *pu64BaseAddr = pHid->u64Base;
8258 }
8259 return VINF_SUCCESS;
8260}
8261
8262
8263/**
8264 * Checks if the given segment can be read from, raise the appropriate
8265 * exception if not.
8266 *
8267 * @returns VBox strict status code.
8268 *
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param pHid Pointer to the hidden register.
8271 * @param iSegReg The register number.
8272 * @param pu64BaseAddr Where to return the base address to use for the
8273 * segment. (In 64-bit code it may differ from the
8274 * base in the hidden segment.)
8275 */
8276IEM_STATIC VBOXSTRICTRC
8277iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8278{
8279 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8280
8281 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8282 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8283 else
8284 {
8285 if (!pHid->Attr.n.u1Present)
8286 {
8287 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8288 AssertRelease(uSel == 0);
8289 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8290 return iemRaiseGeneralProtectionFault0(pVCpu);
8291 }
8292
8293 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8294 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8295 *pu64BaseAddr = pHid->u64Base;
8296 }
8297 return VINF_SUCCESS;
8298}
8299
8300
8301/**
8302 * Applies the segment limit, base and attributes.
8303 *
8304 * This may raise a \#GP or \#SS.
8305 *
8306 * @returns VBox strict status code.
8307 *
8308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8309 * @param fAccess The kind of access which is being performed.
8310 * @param iSegReg The index of the segment register to apply.
8311 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8312 * TSS, ++).
8313 * @param cbMem The access size.
8314 * @param pGCPtrMem Pointer to the guest memory address to apply
8315 * segmentation to. Input and output parameter.
8316 */
8317IEM_STATIC VBOXSTRICTRC
8318iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8319{
8320 if (iSegReg == UINT8_MAX)
8321 return VINF_SUCCESS;
8322
8323 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8324 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8325 switch (pVCpu->iem.s.enmCpuMode)
8326 {
8327 case IEMMODE_16BIT:
8328 case IEMMODE_32BIT:
8329 {
8330 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8331 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8332
8333 if ( pSel->Attr.n.u1Present
8334 && !pSel->Attr.n.u1Unusable)
8335 {
8336 Assert(pSel->Attr.n.u1DescType);
8337 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8338 {
8339 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8340 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8341 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8342
8343 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8344 {
8345 /** @todo CPL check. */
8346 }
8347
8348 /*
8349 * There are two kinds of data selectors, normal and expand down.
8350 */
8351 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8352 {
8353 if ( GCPtrFirst32 > pSel->u32Limit
8354 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8355 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8356 }
8357 else
8358 {
8359 /*
8360 * The upper boundary is defined by the B bit, not the G bit!
8361 */
8362 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8363 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8364 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8365 }
8366 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8367 }
8368 else
8369 {
8370
8371 /*
8372 * Code selector and usually be used to read thru, writing is
8373 * only permitted in real and V8086 mode.
8374 */
8375 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8376 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8377 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8378 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8379 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8380
8381 if ( GCPtrFirst32 > pSel->u32Limit
8382 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8383 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8384
8385 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8386 {
8387 /** @todo CPL check. */
8388 }
8389
8390 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8391 }
8392 }
8393 else
8394 return iemRaiseGeneralProtectionFault0(pVCpu);
8395 return VINF_SUCCESS;
8396 }
8397
8398 case IEMMODE_64BIT:
8399 {
8400 RTGCPTR GCPtrMem = *pGCPtrMem;
8401 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8402 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8403
8404 Assert(cbMem >= 1);
8405 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8406 return VINF_SUCCESS;
8407 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8408 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8409 return iemRaiseGeneralProtectionFault0(pVCpu);
8410 }
8411
8412 default:
8413 AssertFailedReturn(VERR_IEM_IPE_7);
8414 }
8415}
8416
8417
8418/**
8419 * Translates a virtual address to a physical physical address and checks if we
8420 * can access the page as specified.
8421 *
8422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8423 * @param GCPtrMem The virtual address.
8424 * @param fAccess The intended access.
8425 * @param pGCPhysMem Where to return the physical address.
8426 */
8427IEM_STATIC VBOXSTRICTRC
8428iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8429{
8430 /** @todo Need a different PGM interface here. We're currently using
8431 * generic / REM interfaces. this won't cut it for R0. */
8432 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8433 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8434 * here. */
8435 PGMPTWALK Walk;
8436 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8437 if (RT_FAILURE(rc))
8438 {
8439 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8440 /** @todo Check unassigned memory in unpaged mode. */
8441 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8443 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8444 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
8445#endif
8446 *pGCPhysMem = NIL_RTGCPHYS;
8447 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8448 }
8449
8450 /* If the page is writable and does not have the no-exec bit set, all
8451 access is allowed. Otherwise we'll have to check more carefully... */
8452 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8453 {
8454 /* Write to read only memory? */
8455 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8456 && !(Walk.fEffective & X86_PTE_RW)
8457 && ( ( pVCpu->iem.s.uCpl == 3
8458 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8459 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8460 {
8461 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8462 *pGCPhysMem = NIL_RTGCPHYS;
8463#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8464 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8465 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8466#endif
8467 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8468 }
8469
8470 /* Kernel memory accessed by userland? */
8471 if ( !(Walk.fEffective & X86_PTE_US)
8472 && pVCpu->iem.s.uCpl == 3
8473 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8474 {
8475 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8476 *pGCPhysMem = NIL_RTGCPHYS;
8477#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8478 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8479 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8480#endif
8481 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8482 }
8483
8484 /* Executing non-executable memory? */
8485 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8486 && (Walk.fEffective & X86_PTE_PAE_NX)
8487 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8488 {
8489 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8490 *pGCPhysMem = NIL_RTGCPHYS;
8491#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8492 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8493 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8494#endif
8495 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8496 VERR_ACCESS_DENIED);
8497 }
8498 }
8499
8500 /*
8501 * Set the dirty / access flags.
8502 * ASSUMES this is set when the address is translated rather than on committ...
8503 */
8504 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8505 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8506 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8507 {
8508 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8509 AssertRC(rc2);
8510 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
8511 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
8512 }
8513
8514 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
8515 *pGCPhysMem = GCPhys;
8516 return VINF_SUCCESS;
8517}
8518
8519
8520
8521/**
8522 * Maps a physical page.
8523 *
8524 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8526 * @param GCPhysMem The physical address.
8527 * @param fAccess The intended access.
8528 * @param ppvMem Where to return the mapping address.
8529 * @param pLock The PGM lock.
8530 */
8531IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8532{
8533#ifdef IEM_LOG_MEMORY_WRITES
8534 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8535 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8536#endif
8537
8538 /** @todo This API may require some improving later. A private deal with PGM
8539 * regarding locking and unlocking needs to be struct. A couple of TLBs
8540 * living in PGM, but with publicly accessible inlined access methods
8541 * could perhaps be an even better solution. */
8542 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8543 GCPhysMem,
8544 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8545 pVCpu->iem.s.fBypassHandlers,
8546 ppvMem,
8547 pLock);
8548 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8549 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8550
8551 return rc;
8552}
8553
8554
8555/**
8556 * Unmap a page previously mapped by iemMemPageMap.
8557 *
8558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8559 * @param GCPhysMem The physical address.
8560 * @param fAccess The intended access.
8561 * @param pvMem What iemMemPageMap returned.
8562 * @param pLock The PGM lock.
8563 */
8564DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8565{
8566 NOREF(pVCpu);
8567 NOREF(GCPhysMem);
8568 NOREF(fAccess);
8569 NOREF(pvMem);
8570 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8571}
8572
8573
8574/**
8575 * Looks up a memory mapping entry.
8576 *
8577 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8579 * @param pvMem The memory address.
8580 * @param fAccess The access to.
8581 */
8582DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8583{
8584 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8585 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8586 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8587 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8588 return 0;
8589 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8590 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8591 return 1;
8592 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8593 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8594 return 2;
8595 return VERR_NOT_FOUND;
8596}
8597
8598
8599/**
8600 * Finds a free memmap entry when using iNextMapping doesn't work.
8601 *
8602 * @returns Memory mapping index, 1024 on failure.
8603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8604 */
8605IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8606{
8607 /*
8608 * The easy case.
8609 */
8610 if (pVCpu->iem.s.cActiveMappings == 0)
8611 {
8612 pVCpu->iem.s.iNextMapping = 1;
8613 return 0;
8614 }
8615
8616 /* There should be enough mappings for all instructions. */
8617 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8618
8619 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8620 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8621 return i;
8622
8623 AssertFailedReturn(1024);
8624}
8625
8626
8627/**
8628 * Commits a bounce buffer that needs writing back and unmaps it.
8629 *
8630 * @returns Strict VBox status code.
8631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8632 * @param iMemMap The index of the buffer to commit.
8633 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8634 * Always false in ring-3, obviously.
8635 */
8636IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8637{
8638 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8639 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8640#ifdef IN_RING3
8641 Assert(!fPostponeFail);
8642 RT_NOREF_PV(fPostponeFail);
8643#endif
8644
8645 /*
8646 * Do the writing.
8647 */
8648 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8649 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8650 {
8651 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8652 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8653 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8654 if (!pVCpu->iem.s.fBypassHandlers)
8655 {
8656 /*
8657 * Carefully and efficiently dealing with access handler return
8658 * codes make this a little bloated.
8659 */
8660 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8661 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8662 pbBuf,
8663 cbFirst,
8664 PGMACCESSORIGIN_IEM);
8665 if (rcStrict == VINF_SUCCESS)
8666 {
8667 if (cbSecond)
8668 {
8669 rcStrict = PGMPhysWrite(pVM,
8670 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8671 pbBuf + cbFirst,
8672 cbSecond,
8673 PGMACCESSORIGIN_IEM);
8674 if (rcStrict == VINF_SUCCESS)
8675 { /* nothing */ }
8676 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8677 {
8678 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8679 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8680 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8681 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8682 }
8683#ifndef IN_RING3
8684 else if (fPostponeFail)
8685 {
8686 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8688 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8689 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8690 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8691 return iemSetPassUpStatus(pVCpu, rcStrict);
8692 }
8693#endif
8694 else
8695 {
8696 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8699 return rcStrict;
8700 }
8701 }
8702 }
8703 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8704 {
8705 if (!cbSecond)
8706 {
8707 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8708 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8709 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8710 }
8711 else
8712 {
8713 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8715 pbBuf + cbFirst,
8716 cbSecond,
8717 PGMACCESSORIGIN_IEM);
8718 if (rcStrict2 == VINF_SUCCESS)
8719 {
8720 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8721 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8722 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8723 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8724 }
8725 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8726 {
8727 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8728 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8729 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8730 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8731 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8732 }
8733#ifndef IN_RING3
8734 else if (fPostponeFail)
8735 {
8736 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8737 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8739 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8740 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8741 return iemSetPassUpStatus(pVCpu, rcStrict);
8742 }
8743#endif
8744 else
8745 {
8746 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8747 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8748 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8749 return rcStrict2;
8750 }
8751 }
8752 }
8753#ifndef IN_RING3
8754 else if (fPostponeFail)
8755 {
8756 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8759 if (!cbSecond)
8760 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8761 else
8762 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8763 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8764 return iemSetPassUpStatus(pVCpu, rcStrict);
8765 }
8766#endif
8767 else
8768 {
8769 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8771 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8772 return rcStrict;
8773 }
8774 }
8775 else
8776 {
8777 /*
8778 * No access handlers, much simpler.
8779 */
8780 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8781 if (RT_SUCCESS(rc))
8782 {
8783 if (cbSecond)
8784 {
8785 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8786 if (RT_SUCCESS(rc))
8787 { /* likely */ }
8788 else
8789 {
8790 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8791 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8792 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8793 return rc;
8794 }
8795 }
8796 }
8797 else
8798 {
8799 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8801 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8802 return rc;
8803 }
8804 }
8805 }
8806
8807#if defined(IEM_LOG_MEMORY_WRITES)
8808 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8809 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8810 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8811 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8812 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8813 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8814
8815 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8816 g_cbIemWrote = cbWrote;
8817 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8818#endif
8819
8820 /*
8821 * Free the mapping entry.
8822 */
8823 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8824 Assert(pVCpu->iem.s.cActiveMappings != 0);
8825 pVCpu->iem.s.cActiveMappings--;
8826 return VINF_SUCCESS;
8827}
8828
8829
8830/**
8831 * iemMemMap worker that deals with a request crossing pages.
8832 */
8833IEM_STATIC VBOXSTRICTRC
8834iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8835{
8836 /*
8837 * Do the address translations.
8838 */
8839 RTGCPHYS GCPhysFirst;
8840 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8841 if (rcStrict != VINF_SUCCESS)
8842 return rcStrict;
8843
8844 RTGCPHYS GCPhysSecond;
8845 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
8846 fAccess, &GCPhysSecond);
8847 if (rcStrict != VINF_SUCCESS)
8848 return rcStrict;
8849 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8850
8851 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8852
8853 /*
8854 * Read in the current memory content if it's a read, execute or partial
8855 * write access.
8856 */
8857 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8858 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
8859 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8860
8861 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8862 {
8863 if (!pVCpu->iem.s.fBypassHandlers)
8864 {
8865 /*
8866 * Must carefully deal with access handler status codes here,
8867 * makes the code a bit bloated.
8868 */
8869 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8870 if (rcStrict == VINF_SUCCESS)
8871 {
8872 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8873 if (rcStrict == VINF_SUCCESS)
8874 { /*likely */ }
8875 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8876 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8877 else
8878 {
8879 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8880 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8881 return rcStrict;
8882 }
8883 }
8884 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8885 {
8886 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8887 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8888 {
8889 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8890 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8891 }
8892 else
8893 {
8894 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8895 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8896 return rcStrict2;
8897 }
8898 }
8899 else
8900 {
8901 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8902 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8903 return rcStrict;
8904 }
8905 }
8906 else
8907 {
8908 /*
8909 * No informational status codes here, much more straight forward.
8910 */
8911 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8912 if (RT_SUCCESS(rc))
8913 {
8914 Assert(rc == VINF_SUCCESS);
8915 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8916 if (RT_SUCCESS(rc))
8917 Assert(rc == VINF_SUCCESS);
8918 else
8919 {
8920 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8921 return rc;
8922 }
8923 }
8924 else
8925 {
8926 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8927 return rc;
8928 }
8929 }
8930 }
8931#ifdef VBOX_STRICT
8932 else
8933 memset(pbBuf, 0xcc, cbMem);
8934 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8935 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8936#endif
8937
8938 /*
8939 * Commit the bounce buffer entry.
8940 */
8941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8943 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8944 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8945 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8946 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8947 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8948 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8949 pVCpu->iem.s.cActiveMappings++;
8950
8951 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8952 *ppvMem = pbBuf;
8953 return VINF_SUCCESS;
8954}
8955
8956
8957/**
8958 * iemMemMap woker that deals with iemMemPageMap failures.
8959 */
8960IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8961 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8962{
8963 /*
8964 * Filter out conditions we can handle and the ones which shouldn't happen.
8965 */
8966 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8967 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8968 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8969 {
8970 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8971 return rcMap;
8972 }
8973 pVCpu->iem.s.cPotentialExits++;
8974
8975 /*
8976 * Read in the current memory content if it's a read, execute or partial
8977 * write access.
8978 */
8979 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8980 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8981 {
8982 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8983 memset(pbBuf, 0xff, cbMem);
8984 else
8985 {
8986 int rc;
8987 if (!pVCpu->iem.s.fBypassHandlers)
8988 {
8989 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8990 if (rcStrict == VINF_SUCCESS)
8991 { /* nothing */ }
8992 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8993 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8994 else
8995 {
8996 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8997 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8998 return rcStrict;
8999 }
9000 }
9001 else
9002 {
9003 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
9004 if (RT_SUCCESS(rc))
9005 { /* likely */ }
9006 else
9007 {
9008 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
9009 GCPhysFirst, rc));
9010 return rc;
9011 }
9012 }
9013 }
9014 }
9015#ifdef VBOX_STRICT
9016 else
9017 memset(pbBuf, 0xcc, cbMem);
9018#endif
9019#ifdef VBOX_STRICT
9020 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
9021 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
9022#endif
9023
9024 /*
9025 * Commit the bounce buffer entry.
9026 */
9027 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
9028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
9029 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
9030 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
9031 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
9032 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
9033 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
9034 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9035 pVCpu->iem.s.cActiveMappings++;
9036
9037 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9038 *ppvMem = pbBuf;
9039 return VINF_SUCCESS;
9040}
9041
9042
9043
9044/**
9045 * Maps the specified guest memory for the given kind of access.
9046 *
9047 * This may be using bounce buffering of the memory if it's crossing a page
9048 * boundary or if there is an access handler installed for any of it. Because
9049 * of lock prefix guarantees, we're in for some extra clutter when this
9050 * happens.
9051 *
9052 * This may raise a \#GP, \#SS, \#PF or \#AC.
9053 *
9054 * @returns VBox strict status code.
9055 *
9056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9057 * @param ppvMem Where to return the pointer to the mapped
9058 * memory.
9059 * @param cbMem The number of bytes to map. This is usually 1,
9060 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
9061 * string operations it can be up to a page.
9062 * @param iSegReg The index of the segment register to use for
9063 * this access. The base and limits are checked.
9064 * Use UINT8_MAX to indicate that no segmentation
9065 * is required (for IDT, GDT and LDT accesses).
9066 * @param GCPtrMem The address of the guest memory.
9067 * @param fAccess How the memory is being accessed. The
9068 * IEM_ACCESS_TYPE_XXX bit is used to figure out
9069 * how to map the memory, while the
9070 * IEM_ACCESS_WHAT_XXX bit is used when raising
9071 * exceptions.
9072 */
9073IEM_STATIC VBOXSTRICTRC
9074iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
9075{
9076 /*
9077 * Check the input and figure out which mapping entry to use.
9078 */
9079 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
9080 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9081 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9082
9083 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9084 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9085 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9086 {
9087 iMemMap = iemMemMapFindFree(pVCpu);
9088 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9089 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9090 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9091 pVCpu->iem.s.aMemMappings[2].fAccess),
9092 VERR_IEM_IPE_9);
9093 }
9094
9095 /*
9096 * Map the memory, checking that we can actually access it. If something
9097 * slightly complicated happens, fall back on bounce buffering.
9098 */
9099 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9100 if (rcStrict != VINF_SUCCESS)
9101 return rcStrict;
9102
9103 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem > GUEST_PAGE_SIZE) /* Crossing a page boundary? */
9104 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
9105
9106 RTGCPHYS GCPhysFirst;
9107 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9108 if (rcStrict != VINF_SUCCESS)
9109 return rcStrict;
9110
9111 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9112 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9113 if (fAccess & IEM_ACCESS_TYPE_READ)
9114 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9115
9116 void *pvMem;
9117 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9118 if (rcStrict != VINF_SUCCESS)
9119 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9120
9121 /*
9122 * Fill in the mapping table entry.
9123 */
9124 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9125 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9126 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9127 pVCpu->iem.s.cActiveMappings++;
9128
9129 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9130 *ppvMem = pvMem;
9131
9132 return VINF_SUCCESS;
9133}
9134
9135
9136/**
9137 * Commits the guest memory if bounce buffered and unmaps it.
9138 *
9139 * @returns Strict VBox status code.
9140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9141 * @param pvMem The mapping.
9142 * @param fAccess The kind of access.
9143 */
9144IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9145{
9146 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9147 AssertReturn(iMemMap >= 0, iMemMap);
9148
9149 /* If it's bounce buffered, we may need to write back the buffer. */
9150 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9151 {
9152 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9153 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9154 }
9155 /* Otherwise unlock it. */
9156 else
9157 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9158
9159 /* Free the entry. */
9160 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9161 Assert(pVCpu->iem.s.cActiveMappings != 0);
9162 pVCpu->iem.s.cActiveMappings--;
9163 return VINF_SUCCESS;
9164}
9165
9166#ifdef IEM_WITH_SETJMP
9167
9168/**
9169 * Maps the specified guest memory for the given kind of access, longjmp on
9170 * error.
9171 *
9172 * This may be using bounce buffering of the memory if it's crossing a page
9173 * boundary or if there is an access handler installed for any of it. Because
9174 * of lock prefix guarantees, we're in for some extra clutter when this
9175 * happens.
9176 *
9177 * This may raise a \#GP, \#SS, \#PF or \#AC.
9178 *
9179 * @returns Pointer to the mapped memory.
9180 *
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param cbMem The number of bytes to map. This is usually 1,
9183 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
9184 * string operations it can be up to a page.
9185 * @param iSegReg The index of the segment register to use for
9186 * this access. The base and limits are checked.
9187 * Use UINT8_MAX to indicate that no segmentation
9188 * is required (for IDT, GDT and LDT accesses).
9189 * @param GCPtrMem The address of the guest memory.
9190 * @param fAccess How the memory is being accessed. The
9191 * IEM_ACCESS_TYPE_XXX bit is used to figure out
9192 * how to map the memory, while the
9193 * IEM_ACCESS_WHAT_XXX bit is used when raising
9194 * exceptions.
9195 */
9196IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
9197{
9198 /*
9199 * Check the input and figure out which mapping entry to use.
9200 */
9201 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
9202 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9203 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9204
9205 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9206 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9207 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9208 {
9209 iMemMap = iemMemMapFindFree(pVCpu);
9210 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9211 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9212 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9213 pVCpu->iem.s.aMemMappings[2].fAccess),
9214 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9215 }
9216
9217 /*
9218 * Map the memory, checking that we can actually access it. If something
9219 * slightly complicated happens, fall back on bounce buffering.
9220 */
9221 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9222 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9223 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9224
9225 /* Crossing a page boundary? */
9226 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
9227 { /* No (likely). */ }
9228 else
9229 {
9230 void *pvMem;
9231 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9232 if (rcStrict == VINF_SUCCESS)
9233 return pvMem;
9234 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9235 }
9236
9237 RTGCPHYS GCPhysFirst;
9238 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9239 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9240 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9241
9242 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9243 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9244 if (fAccess & IEM_ACCESS_TYPE_READ)
9245 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9246
9247 void *pvMem;
9248 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9249 if (rcStrict == VINF_SUCCESS)
9250 { /* likely */ }
9251 else
9252 {
9253 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9254 if (rcStrict == VINF_SUCCESS)
9255 return pvMem;
9256 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9257 }
9258
9259 /*
9260 * Fill in the mapping table entry.
9261 */
9262 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9263 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9264 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9265 pVCpu->iem.s.cActiveMappings++;
9266
9267 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9268 return pvMem;
9269}
9270
9271
9272/**
9273 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9274 *
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param pvMem The mapping.
9277 * @param fAccess The kind of access.
9278 */
9279IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9280{
9281 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9282 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9283
9284 /* If it's bounce buffered, we may need to write back the buffer. */
9285 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9286 {
9287 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9288 {
9289 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9290 if (rcStrict == VINF_SUCCESS)
9291 return;
9292 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9293 }
9294 }
9295 /* Otherwise unlock it. */
9296 else
9297 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9298
9299 /* Free the entry. */
9300 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9301 Assert(pVCpu->iem.s.cActiveMappings != 0);
9302 pVCpu->iem.s.cActiveMappings--;
9303}
9304
9305#endif /* IEM_WITH_SETJMP */
9306
9307#ifndef IN_RING3
9308/**
9309 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9310 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9311 *
9312 * Allows the instruction to be completed and retired, while the IEM user will
9313 * return to ring-3 immediately afterwards and do the postponed writes there.
9314 *
9315 * @returns VBox status code (no strict statuses). Caller must check
9316 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9318 * @param pvMem The mapping.
9319 * @param fAccess The kind of access.
9320 */
9321IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9322{
9323 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9324 AssertReturn(iMemMap >= 0, iMemMap);
9325
9326 /* If it's bounce buffered, we may need to write back the buffer. */
9327 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9328 {
9329 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9330 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9331 }
9332 /* Otherwise unlock it. */
9333 else
9334 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9335
9336 /* Free the entry. */
9337 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9338 Assert(pVCpu->iem.s.cActiveMappings != 0);
9339 pVCpu->iem.s.cActiveMappings--;
9340 return VINF_SUCCESS;
9341}
9342#endif
9343
9344
9345/**
9346 * Rollbacks mappings, releasing page locks and such.
9347 *
9348 * The caller shall only call this after checking cActiveMappings.
9349 *
9350 * @returns Strict VBox status code to pass up.
9351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9352 */
9353IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9354{
9355 Assert(pVCpu->iem.s.cActiveMappings > 0);
9356
9357 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9358 while (iMemMap-- > 0)
9359 {
9360 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9361 if (fAccess != IEM_ACCESS_INVALID)
9362 {
9363 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9364 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9365 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9366 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9367 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9368 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9369 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9371 pVCpu->iem.s.cActiveMappings--;
9372 }
9373 }
9374}
9375
9376
9377/**
9378 * Fetches a data byte.
9379 *
9380 * @returns Strict VBox status code.
9381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9382 * @param pu8Dst Where to return the byte.
9383 * @param iSegReg The index of the segment register to use for
9384 * this access. The base and limits are checked.
9385 * @param GCPtrMem The address of the guest memory.
9386 */
9387IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9388{
9389 /* The lazy approach for now... */
9390 uint8_t const *pu8Src;
9391 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9392 if (rc == VINF_SUCCESS)
9393 {
9394 *pu8Dst = *pu8Src;
9395 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9396 }
9397 return rc;
9398}
9399
9400
9401#ifdef IEM_WITH_SETJMP
9402/**
9403 * Fetches a data byte, longjmp on error.
9404 *
9405 * @returns The byte.
9406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9407 * @param iSegReg The index of the segment register to use for
9408 * this access. The base and limits are checked.
9409 * @param GCPtrMem The address of the guest memory.
9410 */
9411DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9412{
9413 /* The lazy approach for now... */
9414 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9415 uint8_t const bRet = *pu8Src;
9416 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9417 return bRet;
9418}
9419#endif /* IEM_WITH_SETJMP */
9420
9421
9422/**
9423 * Fetches a data word.
9424 *
9425 * @returns Strict VBox status code.
9426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9427 * @param pu16Dst Where to return the word.
9428 * @param iSegReg The index of the segment register to use for
9429 * this access. The base and limits are checked.
9430 * @param GCPtrMem The address of the guest memory.
9431 */
9432IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9433{
9434 /* The lazy approach for now... */
9435 uint16_t const *pu16Src;
9436 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9437 if (rc == VINF_SUCCESS)
9438 {
9439 *pu16Dst = *pu16Src;
9440 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9441 }
9442 return rc;
9443}
9444
9445
9446#ifdef IEM_WITH_SETJMP
9447/**
9448 * Fetches a data word, longjmp on error.
9449 *
9450 * @returns The word
9451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9452 * @param iSegReg The index of the segment register to use for
9453 * this access. The base and limits are checked.
9454 * @param GCPtrMem The address of the guest memory.
9455 */
9456DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9457{
9458 /* The lazy approach for now... */
9459 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9460 uint16_t const u16Ret = *pu16Src;
9461 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9462 return u16Ret;
9463}
9464#endif
9465
9466
9467/**
9468 * Fetches a data dword.
9469 *
9470 * @returns Strict VBox status code.
9471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9472 * @param pu32Dst Where to return the dword.
9473 * @param iSegReg The index of the segment register to use for
9474 * this access. The base and limits are checked.
9475 * @param GCPtrMem The address of the guest memory.
9476 */
9477IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9478{
9479 /* The lazy approach for now... */
9480 uint32_t const *pu32Src;
9481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9482 if (rc == VINF_SUCCESS)
9483 {
9484 *pu32Dst = *pu32Src;
9485 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9486 }
9487 return rc;
9488}
9489
9490
9491/**
9492 * Fetches a data dword and zero extends it to a qword.
9493 *
9494 * @returns Strict VBox status code.
9495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9496 * @param pu64Dst Where to return the qword.
9497 * @param iSegReg The index of the segment register to use for
9498 * this access. The base and limits are checked.
9499 * @param GCPtrMem The address of the guest memory.
9500 */
9501IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9502{
9503 /* The lazy approach for now... */
9504 uint32_t const *pu32Src;
9505 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9506 if (rc == VINF_SUCCESS)
9507 {
9508 *pu64Dst = *pu32Src;
9509 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9510 }
9511 return rc;
9512}
9513
9514
9515#ifdef IEM_WITH_SETJMP
9516
9517IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9518{
9519 Assert(cbMem >= 1);
9520 Assert(iSegReg < X86_SREG_COUNT);
9521
9522 /*
9523 * 64-bit mode is simpler.
9524 */
9525 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9526 {
9527 if (iSegReg >= X86_SREG_FS)
9528 {
9529 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9530 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9531 GCPtrMem += pSel->u64Base;
9532 }
9533
9534 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9535 return GCPtrMem;
9536 }
9537 /*
9538 * 16-bit and 32-bit segmentation.
9539 */
9540 else
9541 {
9542 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9543 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9544 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9545 == X86DESCATTR_P /* data, expand up */
9546 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9547 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9548 {
9549 /* expand up */
9550 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9551 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9552 && GCPtrLast32 > (uint32_t)GCPtrMem))
9553 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9554 }
9555 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9556 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9557 {
9558 /* expand down */
9559 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9560 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9561 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9562 && GCPtrLast32 > (uint32_t)GCPtrMem))
9563 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9564 }
9565 else
9566 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9567 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9568 }
9569 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9570}
9571
9572
9573IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9574{
9575 Assert(cbMem >= 1);
9576 Assert(iSegReg < X86_SREG_COUNT);
9577
9578 /*
9579 * 64-bit mode is simpler.
9580 */
9581 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9582 {
9583 if (iSegReg >= X86_SREG_FS)
9584 {
9585 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9586 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9587 GCPtrMem += pSel->u64Base;
9588 }
9589
9590 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9591 return GCPtrMem;
9592 }
9593 /*
9594 * 16-bit and 32-bit segmentation.
9595 */
9596 else
9597 {
9598 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9599 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9600 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9601 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9602 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9603 {
9604 /* expand up */
9605 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9606 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9607 && GCPtrLast32 > (uint32_t)GCPtrMem))
9608 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9609 }
9610 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9611 {
9612 /* expand down */
9613 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9614 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9615 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9616 && GCPtrLast32 > (uint32_t)GCPtrMem))
9617 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9618 }
9619 else
9620 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9621 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9622 }
9623 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9624}
9625
9626
9627/**
9628 * Fetches a data dword, longjmp on error, fallback/safe version.
9629 *
9630 * @returns The dword
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param iSegReg The index of the segment register to use for
9633 * this access. The base and limits are checked.
9634 * @param GCPtrMem The address of the guest memory.
9635 */
9636IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9637{
9638 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9639 uint32_t const u32Ret = *pu32Src;
9640 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9641 return u32Ret;
9642}
9643
9644
9645/**
9646 * Fetches a data dword, longjmp on error.
9647 *
9648 * @returns The dword
9649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9650 * @param iSegReg The index of the segment register to use for
9651 * this access. The base and limits are checked.
9652 * @param GCPtrMem The address of the guest memory.
9653 */
9654DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9655{
9656# ifdef IEM_WITH_DATA_TLB
9657 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9658 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9659 {
9660 /// @todo more later.
9661 }
9662
9663 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9664# else
9665 /* The lazy approach. */
9666 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9667 uint32_t const u32Ret = *pu32Src;
9668 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9669 return u32Ret;
9670# endif
9671}
9672#endif
9673
9674
9675#ifdef SOME_UNUSED_FUNCTION
9676/**
9677 * Fetches a data dword and sign extends it to a qword.
9678 *
9679 * @returns Strict VBox status code.
9680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9681 * @param pu64Dst Where to return the sign extended value.
9682 * @param iSegReg The index of the segment register to use for
9683 * this access. The base and limits are checked.
9684 * @param GCPtrMem The address of the guest memory.
9685 */
9686IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9687{
9688 /* The lazy approach for now... */
9689 int32_t const *pi32Src;
9690 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9691 if (rc == VINF_SUCCESS)
9692 {
9693 *pu64Dst = *pi32Src;
9694 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9695 }
9696#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9697 else
9698 *pu64Dst = 0;
9699#endif
9700 return rc;
9701}
9702#endif
9703
9704
9705/**
9706 * Fetches a data qword.
9707 *
9708 * @returns Strict VBox status code.
9709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9710 * @param pu64Dst Where to return the qword.
9711 * @param iSegReg The index of the segment register to use for
9712 * this access. The base and limits are checked.
9713 * @param GCPtrMem The address of the guest memory.
9714 */
9715IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9716{
9717 /* The lazy approach for now... */
9718 uint64_t const *pu64Src;
9719 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 if (rc == VINF_SUCCESS)
9721 {
9722 *pu64Dst = *pu64Src;
9723 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9724 }
9725 return rc;
9726}
9727
9728
9729#ifdef IEM_WITH_SETJMP
9730/**
9731 * Fetches a data qword, longjmp on error.
9732 *
9733 * @returns The qword.
9734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9735 * @param iSegReg The index of the segment register to use for
9736 * this access. The base and limits are checked.
9737 * @param GCPtrMem The address of the guest memory.
9738 */
9739DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9740{
9741 /* The lazy approach for now... */
9742 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9743 uint64_t const u64Ret = *pu64Src;
9744 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9745 return u64Ret;
9746}
9747#endif
9748
9749
9750/**
9751 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9752 *
9753 * @returns Strict VBox status code.
9754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9755 * @param pu64Dst Where to return the qword.
9756 * @param iSegReg The index of the segment register to use for
9757 * this access. The base and limits are checked.
9758 * @param GCPtrMem The address of the guest memory.
9759 */
9760IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9761{
9762 /* The lazy approach for now... */
9763 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9764 if (RT_UNLIKELY(GCPtrMem & 15))
9765 return iemRaiseGeneralProtectionFault0(pVCpu);
9766
9767 uint64_t const *pu64Src;
9768 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9769 if (rc == VINF_SUCCESS)
9770 {
9771 *pu64Dst = *pu64Src;
9772 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9773 }
9774 return rc;
9775}
9776
9777
9778#ifdef IEM_WITH_SETJMP
9779/**
9780 * Fetches a data qword, longjmp on error.
9781 *
9782 * @returns The qword.
9783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9784 * @param iSegReg The index of the segment register to use for
9785 * this access. The base and limits are checked.
9786 * @param GCPtrMem The address of the guest memory.
9787 */
9788DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9789{
9790 /* The lazy approach for now... */
9791 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9792 if (RT_LIKELY(!(GCPtrMem & 15)))
9793 {
9794 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9795 uint64_t const u64Ret = *pu64Src;
9796 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9797 return u64Ret;
9798 }
9799
9800 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9801 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9802}
9803#endif
9804
9805
9806/**
9807 * Fetches a data tword.
9808 *
9809 * @returns Strict VBox status code.
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param pr80Dst Where to return the tword.
9812 * @param iSegReg The index of the segment register to use for
9813 * this access. The base and limits are checked.
9814 * @param GCPtrMem The address of the guest memory.
9815 */
9816IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9817{
9818 /* The lazy approach for now... */
9819 PCRTFLOAT80U pr80Src;
9820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9821 if (rc == VINF_SUCCESS)
9822 {
9823 *pr80Dst = *pr80Src;
9824 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9825 }
9826 return rc;
9827}
9828
9829
9830#ifdef IEM_WITH_SETJMP
9831/**
9832 * Fetches a data tword, longjmp on error.
9833 *
9834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9835 * @param pr80Dst Where to return the tword.
9836 * @param iSegReg The index of the segment register to use for
9837 * this access. The base and limits are checked.
9838 * @param GCPtrMem The address of the guest memory.
9839 */
9840DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9841{
9842 /* The lazy approach for now... */
9843 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9844 *pr80Dst = *pr80Src;
9845 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9846}
9847#endif
9848
9849
9850/**
9851 * Fetches a data dqword (double qword), generally SSE related.
9852 *
9853 * @returns Strict VBox status code.
9854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9855 * @param pu128Dst Where to return the qword.
9856 * @param iSegReg The index of the segment register to use for
9857 * this access. The base and limits are checked.
9858 * @param GCPtrMem The address of the guest memory.
9859 */
9860IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9861{
9862 /* The lazy approach for now... */
9863 PCRTUINT128U pu128Src;
9864 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9865 if (rc == VINF_SUCCESS)
9866 {
9867 pu128Dst->au64[0] = pu128Src->au64[0];
9868 pu128Dst->au64[1] = pu128Src->au64[1];
9869 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9870 }
9871 return rc;
9872}
9873
9874
9875#ifdef IEM_WITH_SETJMP
9876/**
9877 * Fetches a data dqword (double qword), generally SSE related.
9878 *
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param pu128Dst Where to return the qword.
9881 * @param iSegReg The index of the segment register to use for
9882 * this access. The base and limits are checked.
9883 * @param GCPtrMem The address of the guest memory.
9884 */
9885IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9886{
9887 /* The lazy approach for now... */
9888 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9889 pu128Dst->au64[0] = pu128Src->au64[0];
9890 pu128Dst->au64[1] = pu128Src->au64[1];
9891 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9892}
9893#endif
9894
9895
9896/**
9897 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9898 * related.
9899 *
9900 * Raises \#GP(0) if not aligned.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param pu128Dst Where to return the qword.
9905 * @param iSegReg The index of the segment register to use for
9906 * this access. The base and limits are checked.
9907 * @param GCPtrMem The address of the guest memory.
9908 */
9909IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9910{
9911 /* The lazy approach for now... */
9912 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9913 if ( (GCPtrMem & 15)
9914 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9915 return iemRaiseGeneralProtectionFault0(pVCpu);
9916
9917 PCRTUINT128U pu128Src;
9918 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9919 if (rc == VINF_SUCCESS)
9920 {
9921 pu128Dst->au64[0] = pu128Src->au64[0];
9922 pu128Dst->au64[1] = pu128Src->au64[1];
9923 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9924 }
9925 return rc;
9926}
9927
9928
9929#ifdef IEM_WITH_SETJMP
9930/**
9931 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9932 * related, longjmp on error.
9933 *
9934 * Raises \#GP(0) if not aligned.
9935 *
9936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9937 * @param pu128Dst Where to return the qword.
9938 * @param iSegReg The index of the segment register to use for
9939 * this access. The base and limits are checked.
9940 * @param GCPtrMem The address of the guest memory.
9941 */
9942DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9943{
9944 /* The lazy approach for now... */
9945 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9946 if ( (GCPtrMem & 15) == 0
9947 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9948 {
9949 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9950 pu128Dst->au64[0] = pu128Src->au64[0];
9951 pu128Dst->au64[1] = pu128Src->au64[1];
9952 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9953 return;
9954 }
9955
9956 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9958}
9959#endif
9960
9961
9962/**
9963 * Fetches a data oword (octo word), generally AVX related.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param pu256Dst Where to return the qword.
9968 * @param iSegReg The index of the segment register to use for
9969 * this access. The base and limits are checked.
9970 * @param GCPtrMem The address of the guest memory.
9971 */
9972IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9973{
9974 /* The lazy approach for now... */
9975 PCRTUINT256U pu256Src;
9976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9977 if (rc == VINF_SUCCESS)
9978 {
9979 pu256Dst->au64[0] = pu256Src->au64[0];
9980 pu256Dst->au64[1] = pu256Src->au64[1];
9981 pu256Dst->au64[2] = pu256Src->au64[2];
9982 pu256Dst->au64[3] = pu256Src->au64[3];
9983 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9984 }
9985 return rc;
9986}
9987
9988
9989#ifdef IEM_WITH_SETJMP
9990/**
9991 * Fetches a data oword (octo word), generally AVX related.
9992 *
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param pu256Dst Where to return the qword.
9995 * @param iSegReg The index of the segment register to use for
9996 * this access. The base and limits are checked.
9997 * @param GCPtrMem The address of the guest memory.
9998 */
9999IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10000{
10001 /* The lazy approach for now... */
10002 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10003 pu256Dst->au64[0] = pu256Src->au64[0];
10004 pu256Dst->au64[1] = pu256Src->au64[1];
10005 pu256Dst->au64[2] = pu256Src->au64[2];
10006 pu256Dst->au64[3] = pu256Src->au64[3];
10007 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10008}
10009#endif
10010
10011
10012/**
10013 * Fetches a data oword (octo word) at an aligned address, generally AVX
10014 * related.
10015 *
10016 * Raises \#GP(0) if not aligned.
10017 *
10018 * @returns Strict VBox status code.
10019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10020 * @param pu256Dst Where to return the qword.
10021 * @param iSegReg The index of the segment register to use for
10022 * this access. The base and limits are checked.
10023 * @param GCPtrMem The address of the guest memory.
10024 */
10025IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10026{
10027 /* The lazy approach for now... */
10028 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
10029 if (GCPtrMem & 31)
10030 return iemRaiseGeneralProtectionFault0(pVCpu);
10031
10032 PCRTUINT256U pu256Src;
10033 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10034 if (rc == VINF_SUCCESS)
10035 {
10036 pu256Dst->au64[0] = pu256Src->au64[0];
10037 pu256Dst->au64[1] = pu256Src->au64[1];
10038 pu256Dst->au64[2] = pu256Src->au64[2];
10039 pu256Dst->au64[3] = pu256Src->au64[3];
10040 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10041 }
10042 return rc;
10043}
10044
10045
10046#ifdef IEM_WITH_SETJMP
10047/**
10048 * Fetches a data oword (octo word) at an aligned address, generally AVX
10049 * related, longjmp on error.
10050 *
10051 * Raises \#GP(0) if not aligned.
10052 *
10053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10054 * @param pu256Dst Where to return the qword.
10055 * @param iSegReg The index of the segment register to use for
10056 * this access. The base and limits are checked.
10057 * @param GCPtrMem The address of the guest memory.
10058 */
10059DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10060{
10061 /* The lazy approach for now... */
10062 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
10063 if ((GCPtrMem & 31) == 0)
10064 {
10065 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10066 pu256Dst->au64[0] = pu256Src->au64[0];
10067 pu256Dst->au64[1] = pu256Src->au64[1];
10068 pu256Dst->au64[2] = pu256Src->au64[2];
10069 pu256Dst->au64[3] = pu256Src->au64[3];
10070 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10071 return;
10072 }
10073
10074 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10075 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10076}
10077#endif
10078
10079
10080
10081/**
10082 * Fetches a descriptor register (lgdt, lidt).
10083 *
10084 * @returns Strict VBox status code.
10085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10086 * @param pcbLimit Where to return the limit.
10087 * @param pGCPtrBase Where to return the base.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param enmOpSize The effective operand size.
10092 */
10093IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
10094 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
10095{
10096 /*
10097 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
10098 * little special:
10099 * - The two reads are done separately.
10100 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
10101 * - We suspect the 386 to actually commit the limit before the base in
10102 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
10103 * don't try emulate this eccentric behavior, because it's not well
10104 * enough understood and rather hard to trigger.
10105 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
10106 */
10107 VBOXSTRICTRC rcStrict;
10108 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
10109 {
10110 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10111 if (rcStrict == VINF_SUCCESS)
10112 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
10113 }
10114 else
10115 {
10116 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
10117 if (enmOpSize == IEMMODE_32BIT)
10118 {
10119 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
10120 {
10121 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10122 if (rcStrict == VINF_SUCCESS)
10123 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10124 }
10125 else
10126 {
10127 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
10128 if (rcStrict == VINF_SUCCESS)
10129 {
10130 *pcbLimit = (uint16_t)uTmp;
10131 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10132 }
10133 }
10134 if (rcStrict == VINF_SUCCESS)
10135 *pGCPtrBase = uTmp;
10136 }
10137 else
10138 {
10139 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10140 if (rcStrict == VINF_SUCCESS)
10141 {
10142 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10143 if (rcStrict == VINF_SUCCESS)
10144 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
10145 }
10146 }
10147 }
10148 return rcStrict;
10149}
10150
10151
10152
10153/**
10154 * Stores a data byte.
10155 *
10156 * @returns Strict VBox status code.
10157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10158 * @param iSegReg The index of the segment register to use for
10159 * this access. The base and limits are checked.
10160 * @param GCPtrMem The address of the guest memory.
10161 * @param u8Value The value to store.
10162 */
10163IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
10164{
10165 /* The lazy approach for now... */
10166 uint8_t *pu8Dst;
10167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10168 if (rc == VINF_SUCCESS)
10169 {
10170 *pu8Dst = u8Value;
10171 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
10172 }
10173 return rc;
10174}
10175
10176
10177#ifdef IEM_WITH_SETJMP
10178/**
10179 * Stores a data byte, longjmp on error.
10180 *
10181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10182 * @param iSegReg The index of the segment register to use for
10183 * this access. The base and limits are checked.
10184 * @param GCPtrMem The address of the guest memory.
10185 * @param u8Value The value to store.
10186 */
10187IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
10188{
10189 /* The lazy approach for now... */
10190 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10191 *pu8Dst = u8Value;
10192 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
10193}
10194#endif
10195
10196
10197/**
10198 * Stores a data word.
10199 *
10200 * @returns Strict VBox status code.
10201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10202 * @param iSegReg The index of the segment register to use for
10203 * this access. The base and limits are checked.
10204 * @param GCPtrMem The address of the guest memory.
10205 * @param u16Value The value to store.
10206 */
10207IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10208{
10209 /* The lazy approach for now... */
10210 uint16_t *pu16Dst;
10211 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10212 if (rc == VINF_SUCCESS)
10213 {
10214 *pu16Dst = u16Value;
10215 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10216 }
10217 return rc;
10218}
10219
10220
10221#ifdef IEM_WITH_SETJMP
10222/**
10223 * Stores a data word, longjmp on error.
10224 *
10225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10226 * @param iSegReg The index of the segment register to use for
10227 * this access. The base and limits are checked.
10228 * @param GCPtrMem The address of the guest memory.
10229 * @param u16Value The value to store.
10230 */
10231IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10232{
10233 /* The lazy approach for now... */
10234 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10235 *pu16Dst = u16Value;
10236 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10237}
10238#endif
10239
10240
10241/**
10242 * Stores a data dword.
10243 *
10244 * @returns Strict VBox status code.
10245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10246 * @param iSegReg The index of the segment register to use for
10247 * this access. The base and limits are checked.
10248 * @param GCPtrMem The address of the guest memory.
10249 * @param u32Value The value to store.
10250 */
10251IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10252{
10253 /* The lazy approach for now... */
10254 uint32_t *pu32Dst;
10255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10256 if (rc == VINF_SUCCESS)
10257 {
10258 *pu32Dst = u32Value;
10259 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10260 }
10261 return rc;
10262}
10263
10264
10265#ifdef IEM_WITH_SETJMP
10266/**
10267 * Stores a data dword.
10268 *
10269 * @returns Strict VBox status code.
10270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10271 * @param iSegReg The index of the segment register to use for
10272 * this access. The base and limits are checked.
10273 * @param GCPtrMem The address of the guest memory.
10274 * @param u32Value The value to store.
10275 */
10276IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10277{
10278 /* The lazy approach for now... */
10279 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10280 *pu32Dst = u32Value;
10281 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10282}
10283#endif
10284
10285
10286/**
10287 * Stores a data qword.
10288 *
10289 * @returns Strict VBox status code.
10290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10291 * @param iSegReg The index of the segment register to use for
10292 * this access. The base and limits are checked.
10293 * @param GCPtrMem The address of the guest memory.
10294 * @param u64Value The value to store.
10295 */
10296IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10297{
10298 /* The lazy approach for now... */
10299 uint64_t *pu64Dst;
10300 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10301 if (rc == VINF_SUCCESS)
10302 {
10303 *pu64Dst = u64Value;
10304 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10305 }
10306 return rc;
10307}
10308
10309
10310#ifdef IEM_WITH_SETJMP
10311/**
10312 * Stores a data qword, longjmp on error.
10313 *
10314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10315 * @param iSegReg The index of the segment register to use for
10316 * this access. The base and limits are checked.
10317 * @param GCPtrMem The address of the guest memory.
10318 * @param u64Value The value to store.
10319 */
10320IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10321{
10322 /* The lazy approach for now... */
10323 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10324 *pu64Dst = u64Value;
10325 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10326}
10327#endif
10328
10329
10330/**
10331 * Stores a data dqword.
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10335 * @param iSegReg The index of the segment register to use for
10336 * this access. The base and limits are checked.
10337 * @param GCPtrMem The address of the guest memory.
10338 * @param u128Value The value to store.
10339 */
10340IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10341{
10342 /* The lazy approach for now... */
10343 PRTUINT128U pu128Dst;
10344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10345 if (rc == VINF_SUCCESS)
10346 {
10347 pu128Dst->au64[0] = u128Value.au64[0];
10348 pu128Dst->au64[1] = u128Value.au64[1];
10349 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10350 }
10351 return rc;
10352}
10353
10354
10355#ifdef IEM_WITH_SETJMP
10356/**
10357 * Stores a data dqword, longjmp on error.
10358 *
10359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10360 * @param iSegReg The index of the segment register to use for
10361 * this access. The base and limits are checked.
10362 * @param GCPtrMem The address of the guest memory.
10363 * @param u128Value The value to store.
10364 */
10365IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10366{
10367 /* The lazy approach for now... */
10368 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10369 pu128Dst->au64[0] = u128Value.au64[0];
10370 pu128Dst->au64[1] = u128Value.au64[1];
10371 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10372}
10373#endif
10374
10375
10376/**
10377 * Stores a data dqword, SSE aligned.
10378 *
10379 * @returns Strict VBox status code.
10380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10381 * @param iSegReg The index of the segment register to use for
10382 * this access. The base and limits are checked.
10383 * @param GCPtrMem The address of the guest memory.
10384 * @param u128Value The value to store.
10385 */
10386IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10387{
10388 /* The lazy approach for now... */
10389 if ( (GCPtrMem & 15)
10390 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10391 return iemRaiseGeneralProtectionFault0(pVCpu);
10392
10393 PRTUINT128U pu128Dst;
10394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10395 if (rc == VINF_SUCCESS)
10396 {
10397 pu128Dst->au64[0] = u128Value.au64[0];
10398 pu128Dst->au64[1] = u128Value.au64[1];
10399 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10400 }
10401 return rc;
10402}
10403
10404
10405#ifdef IEM_WITH_SETJMP
10406/**
10407 * Stores a data dqword, SSE aligned.
10408 *
10409 * @returns Strict VBox status code.
10410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10411 * @param iSegReg The index of the segment register to use for
10412 * this access. The base and limits are checked.
10413 * @param GCPtrMem The address of the guest memory.
10414 * @param u128Value The value to store.
10415 */
10416DECL_NO_INLINE(IEM_STATIC, void)
10417iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10418{
10419 /* The lazy approach for now... */
10420 if ( (GCPtrMem & 15) == 0
10421 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10422 {
10423 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10424 pu128Dst->au64[0] = u128Value.au64[0];
10425 pu128Dst->au64[1] = u128Value.au64[1];
10426 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10427 return;
10428 }
10429
10430 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10431 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10432}
10433#endif
10434
10435
10436/**
10437 * Stores a data dqword.
10438 *
10439 * @returns Strict VBox status code.
10440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10441 * @param iSegReg The index of the segment register to use for
10442 * this access. The base and limits are checked.
10443 * @param GCPtrMem The address of the guest memory.
10444 * @param pu256Value Pointer to the value to store.
10445 */
10446IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10447{
10448 /* The lazy approach for now... */
10449 PRTUINT256U pu256Dst;
10450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10451 if (rc == VINF_SUCCESS)
10452 {
10453 pu256Dst->au64[0] = pu256Value->au64[0];
10454 pu256Dst->au64[1] = pu256Value->au64[1];
10455 pu256Dst->au64[2] = pu256Value->au64[2];
10456 pu256Dst->au64[3] = pu256Value->au64[3];
10457 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10458 }
10459 return rc;
10460}
10461
10462
10463#ifdef IEM_WITH_SETJMP
10464/**
10465 * Stores a data dqword, longjmp on error.
10466 *
10467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10468 * @param iSegReg The index of the segment register to use for
10469 * this access. The base and limits are checked.
10470 * @param GCPtrMem The address of the guest memory.
10471 * @param pu256Value Pointer to the value to store.
10472 */
10473IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10474{
10475 /* The lazy approach for now... */
10476 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10477 pu256Dst->au64[0] = pu256Value->au64[0];
10478 pu256Dst->au64[1] = pu256Value->au64[1];
10479 pu256Dst->au64[2] = pu256Value->au64[2];
10480 pu256Dst->au64[3] = pu256Value->au64[3];
10481 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10482}
10483#endif
10484
10485
10486/**
10487 * Stores a data dqword, AVX aligned.
10488 *
10489 * @returns Strict VBox status code.
10490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10491 * @param iSegReg The index of the segment register to use for
10492 * this access. The base and limits are checked.
10493 * @param GCPtrMem The address of the guest memory.
10494 * @param pu256Value Pointer to the value to store.
10495 */
10496IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10497{
10498 /* The lazy approach for now... */
10499 if (GCPtrMem & 31)
10500 return iemRaiseGeneralProtectionFault0(pVCpu);
10501
10502 PRTUINT256U pu256Dst;
10503 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10504 if (rc == VINF_SUCCESS)
10505 {
10506 pu256Dst->au64[0] = pu256Value->au64[0];
10507 pu256Dst->au64[1] = pu256Value->au64[1];
10508 pu256Dst->au64[2] = pu256Value->au64[2];
10509 pu256Dst->au64[3] = pu256Value->au64[3];
10510 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10511 }
10512 return rc;
10513}
10514
10515
10516#ifdef IEM_WITH_SETJMP
10517/**
10518 * Stores a data dqword, AVX aligned.
10519 *
10520 * @returns Strict VBox status code.
10521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10522 * @param iSegReg The index of the segment register to use for
10523 * this access. The base and limits are checked.
10524 * @param GCPtrMem The address of the guest memory.
10525 * @param pu256Value Pointer to the value to store.
10526 */
10527DECL_NO_INLINE(IEM_STATIC, void)
10528iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10529{
10530 /* The lazy approach for now... */
10531 if ((GCPtrMem & 31) == 0)
10532 {
10533 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10534 pu256Dst->au64[0] = pu256Value->au64[0];
10535 pu256Dst->au64[1] = pu256Value->au64[1];
10536 pu256Dst->au64[2] = pu256Value->au64[2];
10537 pu256Dst->au64[3] = pu256Value->au64[3];
10538 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10539 return;
10540 }
10541
10542 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10543 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10544}
10545#endif
10546
10547
10548/**
10549 * Stores a descriptor register (sgdt, sidt).
10550 *
10551 * @returns Strict VBox status code.
10552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10553 * @param cbLimit The limit.
10554 * @param GCPtrBase The base address.
10555 * @param iSegReg The index of the segment register to use for
10556 * this access. The base and limits are checked.
10557 * @param GCPtrMem The address of the guest memory.
10558 */
10559IEM_STATIC VBOXSTRICTRC
10560iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10561{
10562 /*
10563 * The SIDT and SGDT instructions actually stores the data using two
10564 * independent writes. The instructions does not respond to opsize prefixes.
10565 */
10566 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10567 if (rcStrict == VINF_SUCCESS)
10568 {
10569 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10570 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10571 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10572 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10573 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10574 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10575 else
10576 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10577 }
10578 return rcStrict;
10579}
10580
10581
10582/**
10583 * Pushes a word onto the stack.
10584 *
10585 * @returns Strict VBox status code.
10586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10587 * @param u16Value The value to push.
10588 */
10589IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10590{
10591 /* Increment the stack pointer. */
10592 uint64_t uNewRsp;
10593 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10594
10595 /* Write the word the lazy way. */
10596 uint16_t *pu16Dst;
10597 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10598 if (rc == VINF_SUCCESS)
10599 {
10600 *pu16Dst = u16Value;
10601 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10602 }
10603
10604 /* Commit the new RSP value unless we an access handler made trouble. */
10605 if (rc == VINF_SUCCESS)
10606 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10607
10608 return rc;
10609}
10610
10611
10612/**
10613 * Pushes a dword onto the stack.
10614 *
10615 * @returns Strict VBox status code.
10616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10617 * @param u32Value The value to push.
10618 */
10619IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10620{
10621 /* Increment the stack pointer. */
10622 uint64_t uNewRsp;
10623 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10624
10625 /* Write the dword the lazy way. */
10626 uint32_t *pu32Dst;
10627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10628 if (rc == VINF_SUCCESS)
10629 {
10630 *pu32Dst = u32Value;
10631 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10632 }
10633
10634 /* Commit the new RSP value unless we an access handler made trouble. */
10635 if (rc == VINF_SUCCESS)
10636 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10637
10638 return rc;
10639}
10640
10641
10642/**
10643 * Pushes a dword segment register value onto the stack.
10644 *
10645 * @returns Strict VBox status code.
10646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10647 * @param u32Value The value to push.
10648 */
10649IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10650{
10651 /* Increment the stack pointer. */
10652 uint64_t uNewRsp;
10653 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10654
10655 /* The intel docs talks about zero extending the selector register
10656 value. My actual intel CPU here might be zero extending the value
10657 but it still only writes the lower word... */
10658 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10659 * happens when crossing an electric page boundrary, is the high word checked
10660 * for write accessibility or not? Probably it is. What about segment limits?
10661 * It appears this behavior is also shared with trap error codes.
10662 *
10663 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10664 * ancient hardware when it actually did change. */
10665 uint16_t *pu16Dst;
10666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10667 if (rc == VINF_SUCCESS)
10668 {
10669 *pu16Dst = (uint16_t)u32Value;
10670 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10671 }
10672
10673 /* Commit the new RSP value unless we an access handler made trouble. */
10674 if (rc == VINF_SUCCESS)
10675 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10676
10677 return rc;
10678}
10679
10680
10681/**
10682 * Pushes a qword onto the stack.
10683 *
10684 * @returns Strict VBox status code.
10685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10686 * @param u64Value The value to push.
10687 */
10688IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10689{
10690 /* Increment the stack pointer. */
10691 uint64_t uNewRsp;
10692 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10693
10694 /* Write the word the lazy way. */
10695 uint64_t *pu64Dst;
10696 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10697 if (rc == VINF_SUCCESS)
10698 {
10699 *pu64Dst = u64Value;
10700 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10701 }
10702
10703 /* Commit the new RSP value unless we an access handler made trouble. */
10704 if (rc == VINF_SUCCESS)
10705 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10706
10707 return rc;
10708}
10709
10710
10711/**
10712 * Pops a word from the stack.
10713 *
10714 * @returns Strict VBox status code.
10715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10716 * @param pu16Value Where to store the popped value.
10717 */
10718IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10719{
10720 /* Increment the stack pointer. */
10721 uint64_t uNewRsp;
10722 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10723
10724 /* Write the word the lazy way. */
10725 uint16_t const *pu16Src;
10726 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10727 if (rc == VINF_SUCCESS)
10728 {
10729 *pu16Value = *pu16Src;
10730 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10731
10732 /* Commit the new RSP value. */
10733 if (rc == VINF_SUCCESS)
10734 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10735 }
10736
10737 return rc;
10738}
10739
10740
10741/**
10742 * Pops a dword from the stack.
10743 *
10744 * @returns Strict VBox status code.
10745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10746 * @param pu32Value Where to store the popped value.
10747 */
10748IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10749{
10750 /* Increment the stack pointer. */
10751 uint64_t uNewRsp;
10752 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10753
10754 /* Write the word the lazy way. */
10755 uint32_t const *pu32Src;
10756 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10757 if (rc == VINF_SUCCESS)
10758 {
10759 *pu32Value = *pu32Src;
10760 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10761
10762 /* Commit the new RSP value. */
10763 if (rc == VINF_SUCCESS)
10764 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10765 }
10766
10767 return rc;
10768}
10769
10770
10771/**
10772 * Pops a qword from the stack.
10773 *
10774 * @returns Strict VBox status code.
10775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10776 * @param pu64Value Where to store the popped value.
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10779{
10780 /* Increment the stack pointer. */
10781 uint64_t uNewRsp;
10782 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10783
10784 /* Write the word the lazy way. */
10785 uint64_t const *pu64Src;
10786 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10787 if (rc == VINF_SUCCESS)
10788 {
10789 *pu64Value = *pu64Src;
10790 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10791
10792 /* Commit the new RSP value. */
10793 if (rc == VINF_SUCCESS)
10794 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10795 }
10796
10797 return rc;
10798}
10799
10800
10801/**
10802 * Pushes a word onto the stack, using a temporary stack pointer.
10803 *
10804 * @returns Strict VBox status code.
10805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10806 * @param u16Value The value to push.
10807 * @param pTmpRsp Pointer to the temporary stack pointer.
10808 */
10809IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10810{
10811 /* Increment the stack pointer. */
10812 RTUINT64U NewRsp = *pTmpRsp;
10813 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10814
10815 /* Write the word the lazy way. */
10816 uint16_t *pu16Dst;
10817 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10818 if (rc == VINF_SUCCESS)
10819 {
10820 *pu16Dst = u16Value;
10821 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10822 }
10823
10824 /* Commit the new RSP value unless we an access handler made trouble. */
10825 if (rc == VINF_SUCCESS)
10826 *pTmpRsp = NewRsp;
10827
10828 return rc;
10829}
10830
10831
10832/**
10833 * Pushes a dword onto the stack, using a temporary stack pointer.
10834 *
10835 * @returns Strict VBox status code.
10836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10837 * @param u32Value The value to push.
10838 * @param pTmpRsp Pointer to the temporary stack pointer.
10839 */
10840IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10841{
10842 /* Increment the stack pointer. */
10843 RTUINT64U NewRsp = *pTmpRsp;
10844 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10845
10846 /* Write the word the lazy way. */
10847 uint32_t *pu32Dst;
10848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10849 if (rc == VINF_SUCCESS)
10850 {
10851 *pu32Dst = u32Value;
10852 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10853 }
10854
10855 /* Commit the new RSP value unless we an access handler made trouble. */
10856 if (rc == VINF_SUCCESS)
10857 *pTmpRsp = NewRsp;
10858
10859 return rc;
10860}
10861
10862
10863/**
10864 * Pushes a dword onto the stack, using a temporary stack pointer.
10865 *
10866 * @returns Strict VBox status code.
10867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10868 * @param u64Value The value to push.
10869 * @param pTmpRsp Pointer to the temporary stack pointer.
10870 */
10871IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10872{
10873 /* Increment the stack pointer. */
10874 RTUINT64U NewRsp = *pTmpRsp;
10875 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10876
10877 /* Write the word the lazy way. */
10878 uint64_t *pu64Dst;
10879 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10880 if (rc == VINF_SUCCESS)
10881 {
10882 *pu64Dst = u64Value;
10883 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10884 }
10885
10886 /* Commit the new RSP value unless we an access handler made trouble. */
10887 if (rc == VINF_SUCCESS)
10888 *pTmpRsp = NewRsp;
10889
10890 return rc;
10891}
10892
10893
10894/**
10895 * Pops a word from the stack, using a temporary stack pointer.
10896 *
10897 * @returns Strict VBox status code.
10898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10899 * @param pu16Value Where to store the popped value.
10900 * @param pTmpRsp Pointer to the temporary stack pointer.
10901 */
10902IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10903{
10904 /* Increment the stack pointer. */
10905 RTUINT64U NewRsp = *pTmpRsp;
10906 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10907
10908 /* Write the word the lazy way. */
10909 uint16_t const *pu16Src;
10910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10911 if (rc == VINF_SUCCESS)
10912 {
10913 *pu16Value = *pu16Src;
10914 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10915
10916 /* Commit the new RSP value. */
10917 if (rc == VINF_SUCCESS)
10918 *pTmpRsp = NewRsp;
10919 }
10920
10921 return rc;
10922}
10923
10924
10925/**
10926 * Pops a dword from the stack, using a temporary stack pointer.
10927 *
10928 * @returns Strict VBox status code.
10929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10930 * @param pu32Value Where to store the popped value.
10931 * @param pTmpRsp Pointer to the temporary stack pointer.
10932 */
10933IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10934{
10935 /* Increment the stack pointer. */
10936 RTUINT64U NewRsp = *pTmpRsp;
10937 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10938
10939 /* Write the word the lazy way. */
10940 uint32_t const *pu32Src;
10941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10942 if (rc == VINF_SUCCESS)
10943 {
10944 *pu32Value = *pu32Src;
10945 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10946
10947 /* Commit the new RSP value. */
10948 if (rc == VINF_SUCCESS)
10949 *pTmpRsp = NewRsp;
10950 }
10951
10952 return rc;
10953}
10954
10955
10956/**
10957 * Pops a qword from the stack, using a temporary stack pointer.
10958 *
10959 * @returns Strict VBox status code.
10960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10961 * @param pu64Value Where to store the popped value.
10962 * @param pTmpRsp Pointer to the temporary stack pointer.
10963 */
10964IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10965{
10966 /* Increment the stack pointer. */
10967 RTUINT64U NewRsp = *pTmpRsp;
10968 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10969
10970 /* Write the word the lazy way. */
10971 uint64_t const *pu64Src;
10972 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10973 if (rcStrict == VINF_SUCCESS)
10974 {
10975 *pu64Value = *pu64Src;
10976 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10977
10978 /* Commit the new RSP value. */
10979 if (rcStrict == VINF_SUCCESS)
10980 *pTmpRsp = NewRsp;
10981 }
10982
10983 return rcStrict;
10984}
10985
10986
10987/**
10988 * Begin a special stack push (used by interrupt, exceptions and such).
10989 *
10990 * This will raise \#SS or \#PF if appropriate.
10991 *
10992 * @returns Strict VBox status code.
10993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10994 * @param cbMem The number of bytes to push onto the stack.
10995 * @param ppvMem Where to return the pointer to the stack memory.
10996 * As with the other memory functions this could be
10997 * direct access or bounce buffered access, so
10998 * don't commit register until the commit call
10999 * succeeds.
11000 * @param puNewRsp Where to return the new RSP value. This must be
11001 * passed unchanged to
11002 * iemMemStackPushCommitSpecial().
11003 */
11004IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
11005{
11006 Assert(cbMem < UINT8_MAX);
11007 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
11008 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
11009}
11010
11011
11012/**
11013 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
11014 *
11015 * This will update the rSP.
11016 *
11017 * @returns Strict VBox status code.
11018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11019 * @param pvMem The pointer returned by
11020 * iemMemStackPushBeginSpecial().
11021 * @param uNewRsp The new RSP value returned by
11022 * iemMemStackPushBeginSpecial().
11023 */
11024IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
11025{
11026 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
11027 if (rcStrict == VINF_SUCCESS)
11028 pVCpu->cpum.GstCtx.rsp = uNewRsp;
11029 return rcStrict;
11030}
11031
11032
11033/**
11034 * Begin a special stack pop (used by iret, retf and such).
11035 *
11036 * This will raise \#SS or \#PF if appropriate.
11037 *
11038 * @returns Strict VBox status code.
11039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11040 * @param cbMem The number of bytes to pop from the stack.
11041 * @param ppvMem Where to return the pointer to the stack memory.
11042 * @param puNewRsp Where to return the new RSP value. This must be
11043 * assigned to CPUMCTX::rsp manually some time
11044 * after iemMemStackPopDoneSpecial() has been
11045 * called.
11046 */
11047IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
11048{
11049 Assert(cbMem < UINT8_MAX);
11050 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
11051 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11052}
11053
11054
11055/**
11056 * Continue a special stack pop (used by iret and retf).
11057 *
11058 * This will raise \#SS or \#PF if appropriate.
11059 *
11060 * @returns Strict VBox status code.
11061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11062 * @param cbMem The number of bytes to pop from the stack.
11063 * @param ppvMem Where to return the pointer to the stack memory.
11064 * @param puNewRsp Where to return the new RSP value. This must be
11065 * assigned to CPUMCTX::rsp manually some time
11066 * after iemMemStackPopDoneSpecial() has been
11067 * called.
11068 */
11069IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
11070{
11071 Assert(cbMem < UINT8_MAX);
11072 RTUINT64U NewRsp;
11073 NewRsp.u = *puNewRsp;
11074 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
11075 *puNewRsp = NewRsp.u;
11076 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11077}
11078
11079
11080/**
11081 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
11082 * iemMemStackPopContinueSpecial).
11083 *
11084 * The caller will manually commit the rSP.
11085 *
11086 * @returns Strict VBox status code.
11087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11088 * @param pvMem The pointer returned by
11089 * iemMemStackPopBeginSpecial() or
11090 * iemMemStackPopContinueSpecial().
11091 */
11092IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
11093{
11094 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
11095}
11096
11097
11098/**
11099 * Fetches a system table byte.
11100 *
11101 * @returns Strict VBox status code.
11102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11103 * @param pbDst Where to return the byte.
11104 * @param iSegReg The index of the segment register to use for
11105 * this access. The base and limits are checked.
11106 * @param GCPtrMem The address of the guest memory.
11107 */
11108IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11109{
11110 /* The lazy approach for now... */
11111 uint8_t const *pbSrc;
11112 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11113 if (rc == VINF_SUCCESS)
11114 {
11115 *pbDst = *pbSrc;
11116 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
11117 }
11118 return rc;
11119}
11120
11121
11122/**
11123 * Fetches a system table word.
11124 *
11125 * @returns Strict VBox status code.
11126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11127 * @param pu16Dst Where to return the word.
11128 * @param iSegReg The index of the segment register to use for
11129 * this access. The base and limits are checked.
11130 * @param GCPtrMem The address of the guest memory.
11131 */
11132IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11133{
11134 /* The lazy approach for now... */
11135 uint16_t const *pu16Src;
11136 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11137 if (rc == VINF_SUCCESS)
11138 {
11139 *pu16Dst = *pu16Src;
11140 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
11141 }
11142 return rc;
11143}
11144
11145
11146/**
11147 * Fetches a system table dword.
11148 *
11149 * @returns Strict VBox status code.
11150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11151 * @param pu32Dst Where to return the dword.
11152 * @param iSegReg The index of the segment register to use for
11153 * this access. The base and limits are checked.
11154 * @param GCPtrMem The address of the guest memory.
11155 */
11156IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11157{
11158 /* The lazy approach for now... */
11159 uint32_t const *pu32Src;
11160 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11161 if (rc == VINF_SUCCESS)
11162 {
11163 *pu32Dst = *pu32Src;
11164 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
11165 }
11166 return rc;
11167}
11168
11169
11170/**
11171 * Fetches a system table qword.
11172 *
11173 * @returns Strict VBox status code.
11174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11175 * @param pu64Dst Where to return the qword.
11176 * @param iSegReg The index of the segment register to use for
11177 * this access. The base and limits are checked.
11178 * @param GCPtrMem The address of the guest memory.
11179 */
11180IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11181{
11182 /* The lazy approach for now... */
11183 uint64_t const *pu64Src;
11184 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11185 if (rc == VINF_SUCCESS)
11186 {
11187 *pu64Dst = *pu64Src;
11188 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
11189 }
11190 return rc;
11191}
11192
11193
11194/**
11195 * Fetches a descriptor table entry with caller specified error code.
11196 *
11197 * @returns Strict VBox status code.
11198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11199 * @param pDesc Where to return the descriptor table entry.
11200 * @param uSel The selector which table entry to fetch.
11201 * @param uXcpt The exception to raise on table lookup error.
11202 * @param uErrorCode The error code associated with the exception.
11203 */
11204IEM_STATIC VBOXSTRICTRC
11205iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
11206{
11207 AssertPtr(pDesc);
11208 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
11209
11210 /** @todo did the 286 require all 8 bytes to be accessible? */
11211 /*
11212 * Get the selector table base and check bounds.
11213 */
11214 RTGCPTR GCPtrBase;
11215 if (uSel & X86_SEL_LDT)
11216 {
11217 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
11218 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
11219 {
11220 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
11221 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
11222 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11223 uErrorCode, 0);
11224 }
11225
11226 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
11227 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
11228 }
11229 else
11230 {
11231 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11232 {
11233 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11234 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11235 uErrorCode, 0);
11236 }
11237 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11238 }
11239
11240 /*
11241 * Read the legacy descriptor and maybe the long mode extensions if
11242 * required.
11243 */
11244 VBOXSTRICTRC rcStrict;
11245 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11246 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11247 else
11248 {
11249 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11250 if (rcStrict == VINF_SUCCESS)
11251 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11252 if (rcStrict == VINF_SUCCESS)
11253 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11254 if (rcStrict == VINF_SUCCESS)
11255 pDesc->Legacy.au16[3] = 0;
11256 else
11257 return rcStrict;
11258 }
11259
11260 if (rcStrict == VINF_SUCCESS)
11261 {
11262 if ( !IEM_IS_LONG_MODE(pVCpu)
11263 || pDesc->Legacy.Gen.u1DescType)
11264 pDesc->Long.au64[1] = 0;
11265 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11266 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11267 else
11268 {
11269 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11270 /** @todo is this the right exception? */
11271 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11272 }
11273 }
11274 return rcStrict;
11275}
11276
11277
11278/**
11279 * Fetches a descriptor table entry.
11280 *
11281 * @returns Strict VBox status code.
11282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11283 * @param pDesc Where to return the descriptor table entry.
11284 * @param uSel The selector which table entry to fetch.
11285 * @param uXcpt The exception to raise on table lookup error.
11286 */
11287IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11288{
11289 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11290}
11291
11292
11293/**
11294 * Fakes a long mode stack selector for SS = 0.
11295 *
11296 * @param pDescSs Where to return the fake stack descriptor.
11297 * @param uDpl The DPL we want.
11298 */
11299IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11300{
11301 pDescSs->Long.au64[0] = 0;
11302 pDescSs->Long.au64[1] = 0;
11303 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11304 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11305 pDescSs->Long.Gen.u2Dpl = uDpl;
11306 pDescSs->Long.Gen.u1Present = 1;
11307 pDescSs->Long.Gen.u1Long = 1;
11308}
11309
11310
11311/**
11312 * Marks the selector descriptor as accessed (only non-system descriptors).
11313 *
11314 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11315 * will therefore skip the limit checks.
11316 *
11317 * @returns Strict VBox status code.
11318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11319 * @param uSel The selector.
11320 */
11321IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11322{
11323 /*
11324 * Get the selector table base and calculate the entry address.
11325 */
11326 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11327 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11328 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11329 GCPtr += uSel & X86_SEL_MASK;
11330
11331 /*
11332 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11333 * ugly stuff to avoid this. This will make sure it's an atomic access
11334 * as well more or less remove any question about 8-bit or 32-bit accesss.
11335 */
11336 VBOXSTRICTRC rcStrict;
11337 uint32_t volatile *pu32;
11338 if ((GCPtr & 3) == 0)
11339 {
11340 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11341 GCPtr += 2 + 2;
11342 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11343 if (rcStrict != VINF_SUCCESS)
11344 return rcStrict;
11345 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11346 }
11347 else
11348 {
11349 /* The misaligned GDT/LDT case, map the whole thing. */
11350 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11351 if (rcStrict != VINF_SUCCESS)
11352 return rcStrict;
11353 switch ((uintptr_t)pu32 & 3)
11354 {
11355 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11356 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11357 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11358 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11359 }
11360 }
11361
11362 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11363}
11364
11365/** @} */
11366
11367
11368/*
11369 * Include the C/C++ implementation of instruction.
11370 */
11371#include "IEMAllCImpl.cpp.h"
11372
11373
11374
11375/** @name "Microcode" macros.
11376 *
11377 * The idea is that we should be able to use the same code to interpret
11378 * instructions as well as recompiler instructions. Thus this obfuscation.
11379 *
11380 * @{
11381 */
11382#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11383#define IEM_MC_END() }
11384#define IEM_MC_PAUSE() do {} while (0)
11385#define IEM_MC_CONTINUE() do {} while (0)
11386
11387/** Internal macro. */
11388#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11389 do \
11390 { \
11391 VBOXSTRICTRC rcStrict2 = a_Expr; \
11392 if (rcStrict2 != VINF_SUCCESS) \
11393 return rcStrict2; \
11394 } while (0)
11395
11396
11397#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11398#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11399#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11400#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11401#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11402#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11403#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11404#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11405#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11406 do { \
11407 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11408 return iemRaiseDeviceNotAvailable(pVCpu); \
11409 } while (0)
11410#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11411 do { \
11412 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11413 return iemRaiseDeviceNotAvailable(pVCpu); \
11414 } while (0)
11415#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11416 do { \
11417 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11418 return iemRaiseMathFault(pVCpu); \
11419 } while (0)
11420#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11421 do { \
11422 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11423 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11424 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11425 return iemRaiseUndefinedOpcode(pVCpu); \
11426 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11427 return iemRaiseDeviceNotAvailable(pVCpu); \
11428 } while (0)
11429#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11430 do { \
11431 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11432 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11433 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11434 return iemRaiseUndefinedOpcode(pVCpu); \
11435 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11436 return iemRaiseDeviceNotAvailable(pVCpu); \
11437 } while (0)
11438#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11439 do { \
11440 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11441 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11442 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11443 return iemRaiseUndefinedOpcode(pVCpu); \
11444 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11445 return iemRaiseDeviceNotAvailable(pVCpu); \
11446 } while (0)
11447#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11448 do { \
11449 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11450 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11451 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11452 return iemRaiseUndefinedOpcode(pVCpu); \
11453 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11454 return iemRaiseDeviceNotAvailable(pVCpu); \
11455 } while (0)
11456#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11457 do { \
11458 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11459 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11460 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11461 return iemRaiseUndefinedOpcode(pVCpu); \
11462 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11463 return iemRaiseDeviceNotAvailable(pVCpu); \
11464 } while (0)
11465#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11466 do { \
11467 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11468 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11469 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11470 return iemRaiseUndefinedOpcode(pVCpu); \
11471 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11472 return iemRaiseDeviceNotAvailable(pVCpu); \
11473 } while (0)
11474#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11475 do { \
11476 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11477 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11478 return iemRaiseUndefinedOpcode(pVCpu); \
11479 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11480 return iemRaiseDeviceNotAvailable(pVCpu); \
11481 } while (0)
11482#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11483 do { \
11484 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11485 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11486 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11487 return iemRaiseUndefinedOpcode(pVCpu); \
11488 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11489 return iemRaiseDeviceNotAvailable(pVCpu); \
11490 } while (0)
11491#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11492 do { \
11493 if (pVCpu->iem.s.uCpl != 0) \
11494 return iemRaiseGeneralProtectionFault0(pVCpu); \
11495 } while (0)
11496#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11497 do { \
11498 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11499 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11500 } while (0)
11501#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11502 do { \
11503 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11504 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11505 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11506 return iemRaiseUndefinedOpcode(pVCpu); \
11507 } while (0)
11508#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11509 do { \
11510 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11511 return iemRaiseGeneralProtectionFault0(pVCpu); \
11512 } while (0)
11513
11514
11515#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11516#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11517#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11518#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11519#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11520#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11521#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11522 uint32_t a_Name; \
11523 uint32_t *a_pName = &a_Name
11524#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11525 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11526
11527#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11528#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11529
11530#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11531#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11532#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11533#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11534#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11535#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11536#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11537#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11538#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11539#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11540#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11541#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11542#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11543#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11544#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11545#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11546#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11547#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11548 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11549 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11550 } while (0)
11551#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11552 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11553 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11554 } while (0)
11555#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11556 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11557 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11558 } while (0)
11559/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11560#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11561 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11562 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11563 } while (0)
11564#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11565 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11566 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11567 } while (0)
11568/** @note Not for IOPL or IF testing or modification. */
11569#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11570#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11571#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11572#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11573
11574#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11575#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11576#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11577#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11578#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11579#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11580#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11581#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11582#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11583#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11584/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11585#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11586 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11587 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11588 } while (0)
11589#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11590 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11591 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11592 } while (0)
11593#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11594 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11595
11596
11597#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11598#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11599/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11600 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11601#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11602#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11603/** @note Not for IOPL or IF testing or modification. */
11604#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11605
11606#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11607#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11608#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11609 do { \
11610 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11611 *pu32Reg += (a_u32Value); \
11612 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11613 } while (0)
11614#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11615
11616#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11617#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11618#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11619 do { \
11620 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11621 *pu32Reg -= (a_u32Value); \
11622 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11623 } while (0)
11624#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11625#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11626
11627#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11628#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11629#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11630#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11631#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11632#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11633#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11634
11635#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11636#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11637#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11638#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11639
11640#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11641#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11642#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11643
11644#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11645#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11646#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11647
11648#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11649#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11650#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11651
11652#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11653#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11654#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11655
11656#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11657
11658#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11659
11660#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11661#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11662#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11663 do { \
11664 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11665 *pu32Reg &= (a_u32Value); \
11666 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11667 } while (0)
11668#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11669
11670#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11671#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11672#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11673 do { \
11674 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11675 *pu32Reg |= (a_u32Value); \
11676 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11677 } while (0)
11678#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11679
11680
11681/** @note Not for IOPL or IF modification. */
11682#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11683/** @note Not for IOPL or IF modification. */
11684#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11685/** @note Not for IOPL or IF modification. */
11686#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11687
11688#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11689
11690/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11691#define IEM_MC_FPU_TO_MMX_MODE() do { \
11692 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11693 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11694 } while (0)
11695
11696/** Switches the FPU state from MMX mode (FTW=0xffff). */
11697#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11698 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11699 } while (0)
11700
11701#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11702 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11703#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11704 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11705#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11706 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11707 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11708 } while (0)
11709#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11710 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11711 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11712 } while (0)
11713#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11714 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11715#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11716 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11717#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11718 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11719
11720#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11721 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11722 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11723 } while (0)
11724#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11725 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11726#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11727 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11728#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11729 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11730#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11731 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11732 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11733 } while (0)
11734#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11735 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11736#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11737 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11738 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11739 } while (0)
11740#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11741 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11742#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11743 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11744 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11745 } while (0)
11746#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11747 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11748#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11749 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11750#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11751 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11752#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11753 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11754#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11755 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11756 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11757 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11758 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11759 } while (0)
11760
11761#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11762 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11763 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11764 } while (0)
11765#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11766 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11767 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11768 } while (0)
11769#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11770 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11771 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11772 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11773 } while (0)
11774#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11775 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11776 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11777 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11778 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11779 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11780 } while (0)
11781
11782#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11783#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11784 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11785 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11786 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11787 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11788 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11789 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11790 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11791 } while (0)
11792#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11793 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11794 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11795 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11796 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11797 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11798 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11799 } while (0)
11800#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11801 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11802 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11803 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11804 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11805 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11806 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11807 } while (0)
11808#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11809 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11810 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11811 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11812 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11813 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11814 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11815 } while (0)
11816
11817#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11818 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11819#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11820 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11821#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11822 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11823#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11824 do { uintptr_t const iYRegTmp = (a_iYReg); \
11825 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11826 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11827 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11828 } while (0)
11829
11830#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11831 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11832 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11833 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11834 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11835 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11836 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11837 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11838 } while (0)
11839#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11840 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11841 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11842 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11843 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11844 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11845 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11846 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11847 } while (0)
11848#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11849 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11850 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11851 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11852 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11853 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11854 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11855 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11856 } while (0)
11857
11858#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11859 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11860 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11861 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11862 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11863 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11864 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11865 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11866 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11867 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11868 } while (0)
11869#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11870 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11871 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11872 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11873 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11874 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11875 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11876 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11877 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11878 } while (0)
11879#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11880 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11881 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11882 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11883 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11884 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11885 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11886 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11887 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11888 } while (0)
11889#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11890 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11891 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11892 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11893 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11894 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11895 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11896 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11897 } while (0)
11898
11899#ifndef IEM_WITH_SETJMP
11900# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11902# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11904# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11906#else
11907# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11908 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11909# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11910 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11911# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11912 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11913#endif
11914
11915#ifndef IEM_WITH_SETJMP
11916# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11917 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11918# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11920# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11922#else
11923# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11924 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11925# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11926 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11927# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11928 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11929#endif
11930
11931#ifndef IEM_WITH_SETJMP
11932# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11934# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11936# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11938#else
11939# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11940 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11941# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11942 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11943# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11944 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11945#endif
11946
11947#ifdef SOME_UNUSED_FUNCTION
11948# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11949 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11950#endif
11951
11952#ifndef IEM_WITH_SETJMP
11953# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11954 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11955# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11956 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11957# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11959# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11961#else
11962# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11963 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11964# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11965 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11966# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11967 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11968# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11969 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11970#endif
11971
11972#ifndef IEM_WITH_SETJMP
11973# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11974 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11975# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11977# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11979#else
11980# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11981 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11982# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11983 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11984# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11985 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11986#endif
11987
11988#ifndef IEM_WITH_SETJMP
11989# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11991# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11993#else
11994# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11995 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11996# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11997 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11998#endif
11999
12000#ifndef IEM_WITH_SETJMP
12001# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
12002 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
12003# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
12004 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
12005#else
12006# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
12007 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
12008# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
12009 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
12010#endif
12011
12012
12013
12014#ifndef IEM_WITH_SETJMP
12015# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12016 do { \
12017 uint8_t u8Tmp; \
12018 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12019 (a_u16Dst) = u8Tmp; \
12020 } while (0)
12021# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12022 do { \
12023 uint8_t u8Tmp; \
12024 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12025 (a_u32Dst) = u8Tmp; \
12026 } while (0)
12027# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12028 do { \
12029 uint8_t u8Tmp; \
12030 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12031 (a_u64Dst) = u8Tmp; \
12032 } while (0)
12033# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12034 do { \
12035 uint16_t u16Tmp; \
12036 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12037 (a_u32Dst) = u16Tmp; \
12038 } while (0)
12039# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12040 do { \
12041 uint16_t u16Tmp; \
12042 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12043 (a_u64Dst) = u16Tmp; \
12044 } while (0)
12045# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12046 do { \
12047 uint32_t u32Tmp; \
12048 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
12049 (a_u64Dst) = u32Tmp; \
12050 } while (0)
12051#else /* IEM_WITH_SETJMP */
12052# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12053 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12054# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12055 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12056# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12057 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12058# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12059 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12060# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12061 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12062# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12063 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12064#endif /* IEM_WITH_SETJMP */
12065
12066#ifndef IEM_WITH_SETJMP
12067# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12068 do { \
12069 uint8_t u8Tmp; \
12070 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12071 (a_u16Dst) = (int8_t)u8Tmp; \
12072 } while (0)
12073# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12074 do { \
12075 uint8_t u8Tmp; \
12076 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12077 (a_u32Dst) = (int8_t)u8Tmp; \
12078 } while (0)
12079# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12080 do { \
12081 uint8_t u8Tmp; \
12082 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12083 (a_u64Dst) = (int8_t)u8Tmp; \
12084 } while (0)
12085# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12086 do { \
12087 uint16_t u16Tmp; \
12088 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12089 (a_u32Dst) = (int16_t)u16Tmp; \
12090 } while (0)
12091# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12092 do { \
12093 uint16_t u16Tmp; \
12094 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12095 (a_u64Dst) = (int16_t)u16Tmp; \
12096 } while (0)
12097# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12098 do { \
12099 uint32_t u32Tmp; \
12100 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
12101 (a_u64Dst) = (int32_t)u32Tmp; \
12102 } while (0)
12103#else /* IEM_WITH_SETJMP */
12104# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12105 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12106# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12107 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12108# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12109 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12110# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12111 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12112# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12113 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12114# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12115 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12116#endif /* IEM_WITH_SETJMP */
12117
12118#ifndef IEM_WITH_SETJMP
12119# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
12120 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
12121# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
12122 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
12123# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
12124 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
12125# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
12126 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
12127#else
12128# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
12129 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
12130# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
12131 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
12132# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
12133 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
12134# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
12135 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
12136#endif
12137
12138#ifndef IEM_WITH_SETJMP
12139# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
12140 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
12141# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
12142 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
12143# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
12144 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
12145# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
12146 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
12147#else
12148# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
12149 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
12150# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
12151 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
12152# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
12153 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
12154# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
12155 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
12156#endif
12157
12158#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
12159#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
12160#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
12161#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
12162#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
12163#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
12164#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
12165 do { \
12166 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
12167 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
12168 } while (0)
12169#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
12170 do { \
12171 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
12172 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
12173 } while (0)
12174
12175#ifndef IEM_WITH_SETJMP
12176# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
12177 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
12178# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
12179 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
12180#else
12181# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
12182 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
12183# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
12184 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
12185#endif
12186
12187#ifndef IEM_WITH_SETJMP
12188# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
12189 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
12190# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
12191 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
12192#else
12193# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
12194 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
12195# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
12196 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
12197#endif
12198
12199
12200#define IEM_MC_PUSH_U16(a_u16Value) \
12201 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
12202#define IEM_MC_PUSH_U32(a_u32Value) \
12203 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
12204#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
12205 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
12206#define IEM_MC_PUSH_U64(a_u64Value) \
12207 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
12208
12209#define IEM_MC_POP_U16(a_pu16Value) \
12210 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
12211#define IEM_MC_POP_U32(a_pu32Value) \
12212 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
12213#define IEM_MC_POP_U64(a_pu64Value) \
12214 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
12215
12216/** Maps guest memory for direct or bounce buffered access.
12217 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12218 * @remarks May return.
12219 */
12220#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12221 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12222
12223/** Maps guest memory for direct or bounce buffered access.
12224 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12225 * @remarks May return.
12226 */
12227#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12228 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12229
12230/** Commits the memory and unmaps the guest memory.
12231 * @remarks May return.
12232 */
12233#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12234 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12235
12236/** Commits the memory and unmaps the guest memory unless the FPU status word
12237 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12238 * that would cause FLD not to store.
12239 *
12240 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12241 * store, while \#P will not.
12242 *
12243 * @remarks May in theory return - for now.
12244 */
12245#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12246 do { \
12247 if ( !(a_u16FSW & X86_FSW_ES) \
12248 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12249 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
12250 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12251 } while (0)
12252
12253/** Calculate efficient address from R/M. */
12254#ifndef IEM_WITH_SETJMP
12255# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12256 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12257#else
12258# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12259 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12260#endif
12261
12262#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12263#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12264#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12265#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12266#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12267#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12268#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12269
12270/**
12271 * Defers the rest of the instruction emulation to a C implementation routine
12272 * and returns, only taking the standard parameters.
12273 *
12274 * @param a_pfnCImpl The pointer to the C routine.
12275 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12276 */
12277#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12278
12279/**
12280 * Defers the rest of instruction emulation to a C implementation routine and
12281 * returns, taking one argument in addition to the standard ones.
12282 *
12283 * @param a_pfnCImpl The pointer to the C routine.
12284 * @param a0 The argument.
12285 */
12286#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12287
12288/**
12289 * Defers the rest of the instruction emulation to a C implementation routine
12290 * and returns, taking two arguments in addition to the standard ones.
12291 *
12292 * @param a_pfnCImpl The pointer to the C routine.
12293 * @param a0 The first extra argument.
12294 * @param a1 The second extra argument.
12295 */
12296#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12297
12298/**
12299 * Defers the rest of the instruction emulation to a C implementation routine
12300 * and returns, taking three arguments in addition to the standard ones.
12301 *
12302 * @param a_pfnCImpl The pointer to the C routine.
12303 * @param a0 The first extra argument.
12304 * @param a1 The second extra argument.
12305 * @param a2 The third extra argument.
12306 */
12307#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12308
12309/**
12310 * Defers the rest of the instruction emulation to a C implementation routine
12311 * and returns, taking four arguments in addition to the standard ones.
12312 *
12313 * @param a_pfnCImpl The pointer to the C routine.
12314 * @param a0 The first extra argument.
12315 * @param a1 The second extra argument.
12316 * @param a2 The third extra argument.
12317 * @param a3 The fourth extra argument.
12318 */
12319#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12320
12321/**
12322 * Defers the rest of the instruction emulation to a C implementation routine
12323 * and returns, taking two arguments in addition to the standard ones.
12324 *
12325 * @param a_pfnCImpl The pointer to the C routine.
12326 * @param a0 The first extra argument.
12327 * @param a1 The second extra argument.
12328 * @param a2 The third extra argument.
12329 * @param a3 The fourth extra argument.
12330 * @param a4 The fifth extra argument.
12331 */
12332#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12333
12334/**
12335 * Defers the entire instruction emulation to a C implementation routine and
12336 * returns, only taking the standard parameters.
12337 *
12338 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12339 *
12340 * @param a_pfnCImpl The pointer to the C routine.
12341 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12342 */
12343#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12344
12345/**
12346 * Defers the entire instruction emulation to a C implementation routine and
12347 * returns, taking one argument in addition to the standard ones.
12348 *
12349 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12350 *
12351 * @param a_pfnCImpl The pointer to the C routine.
12352 * @param a0 The argument.
12353 */
12354#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12355
12356/**
12357 * Defers the entire instruction emulation to a C implementation routine and
12358 * returns, taking two arguments in addition to the standard ones.
12359 *
12360 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12361 *
12362 * @param a_pfnCImpl The pointer to the C routine.
12363 * @param a0 The first extra argument.
12364 * @param a1 The second extra argument.
12365 */
12366#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12367
12368/**
12369 * Defers the entire instruction emulation to a C implementation routine and
12370 * returns, taking three arguments in addition to the standard ones.
12371 *
12372 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12373 *
12374 * @param a_pfnCImpl The pointer to the C routine.
12375 * @param a0 The first extra argument.
12376 * @param a1 The second extra argument.
12377 * @param a2 The third extra argument.
12378 */
12379#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12380
12381/**
12382 * Calls a FPU assembly implementation taking one visible argument.
12383 *
12384 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12385 * @param a0 The first extra argument.
12386 */
12387#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12388 do { \
12389 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12390 } while (0)
12391
12392/**
12393 * Calls a FPU assembly implementation taking two visible arguments.
12394 *
12395 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12396 * @param a0 The first extra argument.
12397 * @param a1 The second extra argument.
12398 */
12399#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12400 do { \
12401 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12402 } while (0)
12403
12404/**
12405 * Calls a FPU assembly implementation taking three visible arguments.
12406 *
12407 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12408 * @param a0 The first extra argument.
12409 * @param a1 The second extra argument.
12410 * @param a2 The third extra argument.
12411 */
12412#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12413 do { \
12414 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12415 } while (0)
12416
12417#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12418 do { \
12419 (a_FpuData).FSW = (a_FSW); \
12420 (a_FpuData).r80Result = *(a_pr80Value); \
12421 } while (0)
12422
12423/** Pushes FPU result onto the stack. */
12424#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12425 iemFpuPushResult(pVCpu, &a_FpuData)
12426/** Pushes FPU result onto the stack and sets the FPUDP. */
12427#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12428 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12429
12430/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12431#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12432 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12433
12434/** Stores FPU result in a stack register. */
12435#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12436 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12437/** Stores FPU result in a stack register and pops the stack. */
12438#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12439 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12440/** Stores FPU result in a stack register and sets the FPUDP. */
12441#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12442 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12443/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12444 * stack. */
12445#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12446 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12447
12448/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12449#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12450 iemFpuUpdateOpcodeAndIp(pVCpu)
12451/** Free a stack register (for FFREE and FFREEP). */
12452#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12453 iemFpuStackFree(pVCpu, a_iStReg)
12454/** Increment the FPU stack pointer. */
12455#define IEM_MC_FPU_STACK_INC_TOP() \
12456 iemFpuStackIncTop(pVCpu)
12457/** Decrement the FPU stack pointer. */
12458#define IEM_MC_FPU_STACK_DEC_TOP() \
12459 iemFpuStackDecTop(pVCpu)
12460
12461/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12462#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12463 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12464/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12465#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12466 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12467/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12468#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12469 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12470/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12471#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12472 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12473/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12474 * stack. */
12475#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12476 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12477/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12478#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12479 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12480
12481/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12482#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12483 iemFpuStackUnderflow(pVCpu, a_iStDst)
12484/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12485 * stack. */
12486#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12487 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12488/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12489 * FPUDS. */
12490#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12491 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12492/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12493 * FPUDS. Pops stack. */
12494#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12495 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12496/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12497 * stack twice. */
12498#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12499 iemFpuStackUnderflowThenPopPop(pVCpu)
12500/** Raises a FPU stack underflow exception for an instruction pushing a result
12501 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12502#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12503 iemFpuStackPushUnderflow(pVCpu)
12504/** Raises a FPU stack underflow exception for an instruction pushing a result
12505 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12506#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12507 iemFpuStackPushUnderflowTwo(pVCpu)
12508
12509/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12510 * FPUIP, FPUCS and FOP. */
12511#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12512 iemFpuStackPushOverflow(pVCpu)
12513/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12514 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12515#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12516 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12517/** Prepares for using the FPU state.
12518 * Ensures that we can use the host FPU in the current context (RC+R0.
12519 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12520#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12521/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12522#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12523/** Actualizes the guest FPU state so it can be accessed and modified. */
12524#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12525
12526/** Prepares for using the SSE state.
12527 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12528 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12529#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12530/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12531#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12532/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12533#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12534
12535/** Prepares for using the AVX state.
12536 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12537 * Ensures the guest AVX state in the CPUMCTX is up to date.
12538 * @note This will include the AVX512 state too when support for it is added
12539 * due to the zero extending feature of VEX instruction. */
12540#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12541/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12542#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12543/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12544#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12545
12546/**
12547 * Calls a MMX assembly implementation taking two visible arguments.
12548 *
12549 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12550 * @param a0 The first extra argument.
12551 * @param a1 The second extra argument.
12552 */
12553#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12554 do { \
12555 IEM_MC_PREPARE_FPU_USAGE(); \
12556 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12557 } while (0)
12558
12559/**
12560 * Calls a MMX assembly implementation taking three visible arguments.
12561 *
12562 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12563 * @param a0 The first extra argument.
12564 * @param a1 The second extra argument.
12565 * @param a2 The third extra argument.
12566 */
12567#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12568 do { \
12569 IEM_MC_PREPARE_FPU_USAGE(); \
12570 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12571 } while (0)
12572
12573
12574/**
12575 * Calls a SSE assembly implementation taking two visible arguments.
12576 *
12577 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12578 * @param a0 The first extra argument.
12579 * @param a1 The second extra argument.
12580 */
12581#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12582 do { \
12583 IEM_MC_PREPARE_SSE_USAGE(); \
12584 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12585 } while (0)
12586
12587/**
12588 * Calls a SSE assembly implementation taking three visible arguments.
12589 *
12590 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12591 * @param a0 The first extra argument.
12592 * @param a1 The second extra argument.
12593 * @param a2 The third extra argument.
12594 */
12595#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12596 do { \
12597 IEM_MC_PREPARE_SSE_USAGE(); \
12598 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12599 } while (0)
12600
12601
12602/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12603 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12604#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12605 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12606
12607/**
12608 * Calls a AVX assembly implementation taking two visible arguments.
12609 *
12610 * There is one implicit zero'th argument, a pointer to the extended state.
12611 *
12612 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12613 * @param a1 The first extra argument.
12614 * @param a2 The second extra argument.
12615 */
12616#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12617 do { \
12618 IEM_MC_PREPARE_AVX_USAGE(); \
12619 a_pfnAImpl(pXState, (a1), (a2)); \
12620 } while (0)
12621
12622/**
12623 * Calls a AVX assembly implementation taking three visible arguments.
12624 *
12625 * There is one implicit zero'th argument, a pointer to the extended state.
12626 *
12627 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12628 * @param a1 The first extra argument.
12629 * @param a2 The second extra argument.
12630 * @param a3 The third extra argument.
12631 */
12632#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12633 do { \
12634 IEM_MC_PREPARE_AVX_USAGE(); \
12635 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12636 } while (0)
12637
12638/** @note Not for IOPL or IF testing. */
12639#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12640/** @note Not for IOPL or IF testing. */
12641#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12642/** @note Not for IOPL or IF testing. */
12643#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12644/** @note Not for IOPL or IF testing. */
12645#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12646/** @note Not for IOPL or IF testing. */
12647#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12648 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12649 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12650/** @note Not for IOPL or IF testing. */
12651#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12652 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12653 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12654/** @note Not for IOPL or IF testing. */
12655#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12656 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12657 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12658 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12659/** @note Not for IOPL or IF testing. */
12660#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12661 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12662 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12663 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12664#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12665#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12666#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12667/** @note Not for IOPL or IF testing. */
12668#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12669 if ( pVCpu->cpum.GstCtx.cx != 0 \
12670 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12671/** @note Not for IOPL or IF testing. */
12672#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12673 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12674 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12675/** @note Not for IOPL or IF testing. */
12676#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12677 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12678 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12679/** @note Not for IOPL or IF testing. */
12680#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12681 if ( pVCpu->cpum.GstCtx.cx != 0 \
12682 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12683/** @note Not for IOPL or IF testing. */
12684#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12685 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12686 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12687/** @note Not for IOPL or IF testing. */
12688#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12689 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12690 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12691#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12692#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12693
12694#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12695 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12696#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12697 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12698#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12699 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12700#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12701 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12702#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12703 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12704#define IEM_MC_IF_FCW_IM() \
12705 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12706
12707#define IEM_MC_ELSE() } else {
12708#define IEM_MC_ENDIF() } do {} while (0)
12709
12710/** @} */
12711
12712
12713/** @name Opcode Debug Helpers.
12714 * @{
12715 */
12716#ifdef VBOX_WITH_STATISTICS
12717# ifdef IN_RING3
12718# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12719# else
12720# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12721# endif
12722#else
12723# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12724#endif
12725
12726#ifdef DEBUG
12727# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12728 do { \
12729 IEMOP_INC_STATS(a_Stats); \
12730 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12731 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12732 } while (0)
12733
12734# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12735 do { \
12736 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12737 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12738 (void)RT_CONCAT(OP_,a_Upper); \
12739 (void)(a_fDisHints); \
12740 (void)(a_fIemHints); \
12741 } while (0)
12742
12743# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12744 do { \
12745 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12746 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12747 (void)RT_CONCAT(OP_,a_Upper); \
12748 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12749 (void)(a_fDisHints); \
12750 (void)(a_fIemHints); \
12751 } while (0)
12752
12753# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12754 do { \
12755 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12756 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12757 (void)RT_CONCAT(OP_,a_Upper); \
12758 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12759 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12760 (void)(a_fDisHints); \
12761 (void)(a_fIemHints); \
12762 } while (0)
12763
12764# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12765 do { \
12766 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12767 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12768 (void)RT_CONCAT(OP_,a_Upper); \
12769 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12770 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12771 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12772 (void)(a_fDisHints); \
12773 (void)(a_fIemHints); \
12774 } while (0)
12775
12776# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12777 do { \
12778 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12779 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12780 (void)RT_CONCAT(OP_,a_Upper); \
12781 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12782 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12783 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12784 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12785 (void)(a_fDisHints); \
12786 (void)(a_fIemHints); \
12787 } while (0)
12788
12789#else
12790# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12791
12792# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12793 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12794# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12795 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12796# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12797 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12798# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12799 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12800# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12801 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12802
12803#endif
12804
12805#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12806 IEMOP_MNEMONIC0EX(a_Lower, \
12807 #a_Lower, \
12808 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12809#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12810 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12811 #a_Lower " " #a_Op1, \
12812 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12813#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12814 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12815 #a_Lower " " #a_Op1 "," #a_Op2, \
12816 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12817#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12818 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12819 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12820 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12821#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12822 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12823 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12824 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12825
12826/** @} */
12827
12828
12829/** @name Opcode Helpers.
12830 * @{
12831 */
12832
12833#ifdef IN_RING3
12834# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12835 do { \
12836 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12837 else \
12838 { \
12839 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12840 return IEMOP_RAISE_INVALID_OPCODE(); \
12841 } \
12842 } while (0)
12843#else
12844# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12845 do { \
12846 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12847 else return IEMOP_RAISE_INVALID_OPCODE(); \
12848 } while (0)
12849#endif
12850
12851/** The instruction requires a 186 or later. */
12852#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12853# define IEMOP_HLP_MIN_186() do { } while (0)
12854#else
12855# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12856#endif
12857
12858/** The instruction requires a 286 or later. */
12859#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12860# define IEMOP_HLP_MIN_286() do { } while (0)
12861#else
12862# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12863#endif
12864
12865/** The instruction requires a 386 or later. */
12866#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12867# define IEMOP_HLP_MIN_386() do { } while (0)
12868#else
12869# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12870#endif
12871
12872/** The instruction requires a 386 or later if the given expression is true. */
12873#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12874# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12875#else
12876# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12877#endif
12878
12879/** The instruction requires a 486 or later. */
12880#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12881# define IEMOP_HLP_MIN_486() do { } while (0)
12882#else
12883# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12884#endif
12885
12886/** The instruction requires a Pentium (586) or later. */
12887#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12888# define IEMOP_HLP_MIN_586() do { } while (0)
12889#else
12890# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12891#endif
12892
12893/** The instruction requires a PentiumPro (686) or later. */
12894#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12895# define IEMOP_HLP_MIN_686() do { } while (0)
12896#else
12897# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12898#endif
12899
12900
12901/** The instruction raises an \#UD in real and V8086 mode. */
12902#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12903 do \
12904 { \
12905 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12906 else return IEMOP_RAISE_INVALID_OPCODE(); \
12907 } while (0)
12908
12909#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12910/** This instruction raises an \#UD in real and V8086 mode or when not using a
12911 * 64-bit code segment when in long mode (applicable to all VMX instructions
12912 * except VMCALL).
12913 */
12914#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12915 do \
12916 { \
12917 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12918 && ( !IEM_IS_LONG_MODE(pVCpu) \
12919 || IEM_IS_64BIT_CODE(pVCpu))) \
12920 { /* likely */ } \
12921 else \
12922 { \
12923 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12924 { \
12925 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12926 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12927 return IEMOP_RAISE_INVALID_OPCODE(); \
12928 } \
12929 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12930 { \
12931 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12932 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12933 return IEMOP_RAISE_INVALID_OPCODE(); \
12934 } \
12935 } \
12936 } while (0)
12937
12938/** The instruction can only be executed in VMX operation (VMX root mode and
12939 * non-root mode).
12940 *
12941 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12942 */
12943# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12944 do \
12945 { \
12946 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12947 else \
12948 { \
12949 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12950 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12951 return IEMOP_RAISE_INVALID_OPCODE(); \
12952 } \
12953 } while (0)
12954#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12955
12956/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12957 * 64-bit mode. */
12958#define IEMOP_HLP_NO_64BIT() \
12959 do \
12960 { \
12961 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12962 return IEMOP_RAISE_INVALID_OPCODE(); \
12963 } while (0)
12964
12965/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12966 * 64-bit mode. */
12967#define IEMOP_HLP_ONLY_64BIT() \
12968 do \
12969 { \
12970 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12971 return IEMOP_RAISE_INVALID_OPCODE(); \
12972 } while (0)
12973
12974/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12975#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12976 do \
12977 { \
12978 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12979 iemRecalEffOpSize64Default(pVCpu); \
12980 } while (0)
12981
12982/** The instruction has 64-bit operand size if 64-bit mode. */
12983#define IEMOP_HLP_64BIT_OP_SIZE() \
12984 do \
12985 { \
12986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12987 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12988 } while (0)
12989
12990/** Only a REX prefix immediately preceeding the first opcode byte takes
12991 * effect. This macro helps ensuring this as well as logging bad guest code. */
12992#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12993 do \
12994 { \
12995 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12996 { \
12997 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12998 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12999 pVCpu->iem.s.uRexB = 0; \
13000 pVCpu->iem.s.uRexIndex = 0; \
13001 pVCpu->iem.s.uRexReg = 0; \
13002 iemRecalEffOpSize(pVCpu); \
13003 } \
13004 } while (0)
13005
13006/**
13007 * Done decoding.
13008 */
13009#define IEMOP_HLP_DONE_DECODING() \
13010 do \
13011 { \
13012 /*nothing for now, maybe later... */ \
13013 } while (0)
13014
13015/**
13016 * Done decoding, raise \#UD exception if lock prefix present.
13017 */
13018#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
13019 do \
13020 { \
13021 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13022 { /* likely */ } \
13023 else \
13024 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13025 } while (0)
13026
13027
13028/**
13029 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13030 * repnz or size prefixes are present, or if in real or v8086 mode.
13031 */
13032#define IEMOP_HLP_DONE_VEX_DECODING() \
13033 do \
13034 { \
13035 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13036 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13037 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
13038 { /* likely */ } \
13039 else \
13040 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13041 } while (0)
13042
13043/**
13044 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13045 * repnz or size prefixes are present, or if in real or v8086 mode.
13046 */
13047#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
13048 do \
13049 { \
13050 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13051 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13052 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
13053 && pVCpu->iem.s.uVexLength == 0)) \
13054 { /* likely */ } \
13055 else \
13056 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13057 } while (0)
13058
13059
13060/**
13061 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13062 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
13063 * register 0, or if in real or v8086 mode.
13064 */
13065#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
13066 do \
13067 { \
13068 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13069 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13070 && !pVCpu->iem.s.uVex3rdReg \
13071 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
13072 { /* likely */ } \
13073 else \
13074 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13075 } while (0)
13076
13077/**
13078 * Done decoding VEX, no V, L=0.
13079 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
13080 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
13081 */
13082#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
13083 do \
13084 { \
13085 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13086 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
13087 && pVCpu->iem.s.uVexLength == 0 \
13088 && pVCpu->iem.s.uVex3rdReg == 0 \
13089 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
13090 { /* likely */ } \
13091 else \
13092 return IEMOP_RAISE_INVALID_OPCODE(); \
13093 } while (0)
13094
13095#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
13096 do \
13097 { \
13098 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13099 { /* likely */ } \
13100 else \
13101 { \
13102 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
13103 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13104 } \
13105 } while (0)
13106#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
13107 do \
13108 { \
13109 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13110 { /* likely */ } \
13111 else \
13112 { \
13113 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
13114 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13115 } \
13116 } while (0)
13117
13118/**
13119 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
13120 * are present.
13121 */
13122#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
13123 do \
13124 { \
13125 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
13126 { /* likely */ } \
13127 else \
13128 return IEMOP_RAISE_INVALID_OPCODE(); \
13129 } while (0)
13130
13131/**
13132 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
13133 * prefixes are present.
13134 */
13135#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
13136 do \
13137 { \
13138 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
13139 { /* likely */ } \
13140 else \
13141 return IEMOP_RAISE_INVALID_OPCODE(); \
13142 } while (0)
13143
13144
13145/**
13146 * Calculates the effective address of a ModR/M memory operand.
13147 *
13148 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13149 *
13150 * @return Strict VBox status code.
13151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13152 * @param bRm The ModRM byte.
13153 * @param cbImm The size of any immediate following the
13154 * effective address opcode bytes. Important for
13155 * RIP relative addressing.
13156 * @param pGCPtrEff Where to return the effective address.
13157 */
13158IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
13159{
13160 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13161# define SET_SS_DEF() \
13162 do \
13163 { \
13164 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13165 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13166 } while (0)
13167
13168 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13169 {
13170/** @todo Check the effective address size crap! */
13171 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13172 {
13173 uint16_t u16EffAddr;
13174
13175 /* Handle the disp16 form with no registers first. */
13176 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13177 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13178 else
13179 {
13180 /* Get the displacment. */
13181 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13182 {
13183 case 0: u16EffAddr = 0; break;
13184 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13185 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13186 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13187 }
13188
13189 /* Add the base and index registers to the disp. */
13190 switch (bRm & X86_MODRM_RM_MASK)
13191 {
13192 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13193 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13194 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13195 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13196 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13197 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13198 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13199 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13200 }
13201 }
13202
13203 *pGCPtrEff = u16EffAddr;
13204 }
13205 else
13206 {
13207 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13208 uint32_t u32EffAddr;
13209
13210 /* Handle the disp32 form with no registers first. */
13211 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13212 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13213 else
13214 {
13215 /* Get the register (or SIB) value. */
13216 switch ((bRm & X86_MODRM_RM_MASK))
13217 {
13218 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13219 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13220 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13221 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13222 case 4: /* SIB */
13223 {
13224 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13225
13226 /* Get the index and scale it. */
13227 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13228 {
13229 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13230 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13231 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13232 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13233 case 4: u32EffAddr = 0; /*none */ break;
13234 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13235 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13236 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13237 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13238 }
13239 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13240
13241 /* add base */
13242 switch (bSib & X86_SIB_BASE_MASK)
13243 {
13244 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13245 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13246 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13247 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13248 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13249 case 5:
13250 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13251 {
13252 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13253 SET_SS_DEF();
13254 }
13255 else
13256 {
13257 uint32_t u32Disp;
13258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13259 u32EffAddr += u32Disp;
13260 }
13261 break;
13262 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13263 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13264 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13265 }
13266 break;
13267 }
13268 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13269 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13270 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13272 }
13273
13274 /* Get and add the displacement. */
13275 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13276 {
13277 case 0:
13278 break;
13279 case 1:
13280 {
13281 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13282 u32EffAddr += i8Disp;
13283 break;
13284 }
13285 case 2:
13286 {
13287 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13288 u32EffAddr += u32Disp;
13289 break;
13290 }
13291 default:
13292 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13293 }
13294
13295 }
13296 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13297 *pGCPtrEff = u32EffAddr;
13298 else
13299 {
13300 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13301 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13302 }
13303 }
13304 }
13305 else
13306 {
13307 uint64_t u64EffAddr;
13308
13309 /* Handle the rip+disp32 form with no registers first. */
13310 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13311 {
13312 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13313 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13314 }
13315 else
13316 {
13317 /* Get the register (or SIB) value. */
13318 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13319 {
13320 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13321 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13322 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13323 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13324 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13325 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13326 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13327 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13328 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13329 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13330 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13331 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13332 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13333 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13334 /* SIB */
13335 case 4:
13336 case 12:
13337 {
13338 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13339
13340 /* Get the index and scale it. */
13341 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13342 {
13343 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13344 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13345 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13346 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13347 case 4: u64EffAddr = 0; /*none */ break;
13348 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13349 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13350 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13351 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13352 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13353 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13354 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13355 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13356 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13357 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13358 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13360 }
13361 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13362
13363 /* add base */
13364 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13365 {
13366 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13367 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13368 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13369 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13370 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13371 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13372 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13373 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13374 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13375 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13376 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13377 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13378 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13379 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13380 /* complicated encodings */
13381 case 5:
13382 case 13:
13383 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13384 {
13385 if (!pVCpu->iem.s.uRexB)
13386 {
13387 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13388 SET_SS_DEF();
13389 }
13390 else
13391 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13392 }
13393 else
13394 {
13395 uint32_t u32Disp;
13396 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13397 u64EffAddr += (int32_t)u32Disp;
13398 }
13399 break;
13400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13401 }
13402 break;
13403 }
13404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13405 }
13406
13407 /* Get and add the displacement. */
13408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13409 {
13410 case 0:
13411 break;
13412 case 1:
13413 {
13414 int8_t i8Disp;
13415 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13416 u64EffAddr += i8Disp;
13417 break;
13418 }
13419 case 2:
13420 {
13421 uint32_t u32Disp;
13422 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13423 u64EffAddr += (int32_t)u32Disp;
13424 break;
13425 }
13426 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13427 }
13428
13429 }
13430
13431 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13432 *pGCPtrEff = u64EffAddr;
13433 else
13434 {
13435 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13436 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13437 }
13438 }
13439
13440 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13441 return VINF_SUCCESS;
13442}
13443
13444
13445/**
13446 * Calculates the effective address of a ModR/M memory operand.
13447 *
13448 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13449 *
13450 * @return Strict VBox status code.
13451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13452 * @param bRm The ModRM byte.
13453 * @param cbImm The size of any immediate following the
13454 * effective address opcode bytes. Important for
13455 * RIP relative addressing.
13456 * @param pGCPtrEff Where to return the effective address.
13457 * @param offRsp RSP displacement.
13458 */
13459IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13460{
13461 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13462# define SET_SS_DEF() \
13463 do \
13464 { \
13465 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13466 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13467 } while (0)
13468
13469 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13470 {
13471/** @todo Check the effective address size crap! */
13472 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13473 {
13474 uint16_t u16EffAddr;
13475
13476 /* Handle the disp16 form with no registers first. */
13477 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13478 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13479 else
13480 {
13481 /* Get the displacment. */
13482 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13483 {
13484 case 0: u16EffAddr = 0; break;
13485 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13486 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13487 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13488 }
13489
13490 /* Add the base and index registers to the disp. */
13491 switch (bRm & X86_MODRM_RM_MASK)
13492 {
13493 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13494 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13495 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13496 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13497 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13498 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13499 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13500 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13501 }
13502 }
13503
13504 *pGCPtrEff = u16EffAddr;
13505 }
13506 else
13507 {
13508 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13509 uint32_t u32EffAddr;
13510
13511 /* Handle the disp32 form with no registers first. */
13512 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13513 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13514 else
13515 {
13516 /* Get the register (or SIB) value. */
13517 switch ((bRm & X86_MODRM_RM_MASK))
13518 {
13519 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13520 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13521 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13522 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13523 case 4: /* SIB */
13524 {
13525 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13526
13527 /* Get the index and scale it. */
13528 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13529 {
13530 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13531 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13532 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13533 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13534 case 4: u32EffAddr = 0; /*none */ break;
13535 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13536 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13537 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13539 }
13540 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13541
13542 /* add base */
13543 switch (bSib & X86_SIB_BASE_MASK)
13544 {
13545 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13546 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13547 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13548 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13549 case 4:
13550 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13551 SET_SS_DEF();
13552 break;
13553 case 5:
13554 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13555 {
13556 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13557 SET_SS_DEF();
13558 }
13559 else
13560 {
13561 uint32_t u32Disp;
13562 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13563 u32EffAddr += u32Disp;
13564 }
13565 break;
13566 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13567 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13569 }
13570 break;
13571 }
13572 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13573 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13574 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13576 }
13577
13578 /* Get and add the displacement. */
13579 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13580 {
13581 case 0:
13582 break;
13583 case 1:
13584 {
13585 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13586 u32EffAddr += i8Disp;
13587 break;
13588 }
13589 case 2:
13590 {
13591 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13592 u32EffAddr += u32Disp;
13593 break;
13594 }
13595 default:
13596 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13597 }
13598
13599 }
13600 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13601 *pGCPtrEff = u32EffAddr;
13602 else
13603 {
13604 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13605 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13606 }
13607 }
13608 }
13609 else
13610 {
13611 uint64_t u64EffAddr;
13612
13613 /* Handle the rip+disp32 form with no registers first. */
13614 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13615 {
13616 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13617 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13618 }
13619 else
13620 {
13621 /* Get the register (or SIB) value. */
13622 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13623 {
13624 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13625 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13626 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13627 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13628 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13629 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13630 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13631 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13632 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13633 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13634 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13635 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13636 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13637 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13638 /* SIB */
13639 case 4:
13640 case 12:
13641 {
13642 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13643
13644 /* Get the index and scale it. */
13645 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13646 {
13647 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13648 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13649 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13650 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13651 case 4: u64EffAddr = 0; /*none */ break;
13652 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13653 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13654 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13655 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13656 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13657 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13658 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13659 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13660 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13661 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13662 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13663 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13664 }
13665 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13666
13667 /* add base */
13668 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13669 {
13670 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13671 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13672 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13673 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13674 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13675 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13676 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13677 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13678 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13679 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13680 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13681 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13682 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13683 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13684 /* complicated encodings */
13685 case 5:
13686 case 13:
13687 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13688 {
13689 if (!pVCpu->iem.s.uRexB)
13690 {
13691 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13692 SET_SS_DEF();
13693 }
13694 else
13695 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13696 }
13697 else
13698 {
13699 uint32_t u32Disp;
13700 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13701 u64EffAddr += (int32_t)u32Disp;
13702 }
13703 break;
13704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13705 }
13706 break;
13707 }
13708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13709 }
13710
13711 /* Get and add the displacement. */
13712 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13713 {
13714 case 0:
13715 break;
13716 case 1:
13717 {
13718 int8_t i8Disp;
13719 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13720 u64EffAddr += i8Disp;
13721 break;
13722 }
13723 case 2:
13724 {
13725 uint32_t u32Disp;
13726 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13727 u64EffAddr += (int32_t)u32Disp;
13728 break;
13729 }
13730 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13731 }
13732
13733 }
13734
13735 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13736 *pGCPtrEff = u64EffAddr;
13737 else
13738 {
13739 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13740 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13741 }
13742 }
13743
13744 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13745 return VINF_SUCCESS;
13746}
13747
13748
13749#ifdef IEM_WITH_SETJMP
13750/**
13751 * Calculates the effective address of a ModR/M memory operand.
13752 *
13753 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13754 *
13755 * May longjmp on internal error.
13756 *
13757 * @return The effective address.
13758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13759 * @param bRm The ModRM byte.
13760 * @param cbImm The size of any immediate following the
13761 * effective address opcode bytes. Important for
13762 * RIP relative addressing.
13763 */
13764IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13765{
13766 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13767# define SET_SS_DEF() \
13768 do \
13769 { \
13770 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13771 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13772 } while (0)
13773
13774 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13775 {
13776/** @todo Check the effective address size crap! */
13777 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13778 {
13779 uint16_t u16EffAddr;
13780
13781 /* Handle the disp16 form with no registers first. */
13782 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13783 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13784 else
13785 {
13786 /* Get the displacment. */
13787 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13788 {
13789 case 0: u16EffAddr = 0; break;
13790 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13791 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13792 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13793 }
13794
13795 /* Add the base and index registers to the disp. */
13796 switch (bRm & X86_MODRM_RM_MASK)
13797 {
13798 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13799 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13800 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13801 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13802 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13803 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13804 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13805 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13806 }
13807 }
13808
13809 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13810 return u16EffAddr;
13811 }
13812
13813 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13814 uint32_t u32EffAddr;
13815
13816 /* Handle the disp32 form with no registers first. */
13817 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13818 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13819 else
13820 {
13821 /* Get the register (or SIB) value. */
13822 switch ((bRm & X86_MODRM_RM_MASK))
13823 {
13824 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13825 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13826 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13827 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13828 case 4: /* SIB */
13829 {
13830 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13831
13832 /* Get the index and scale it. */
13833 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13834 {
13835 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13836 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13837 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13838 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13839 case 4: u32EffAddr = 0; /*none */ break;
13840 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13841 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13842 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13843 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13844 }
13845 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13846
13847 /* add base */
13848 switch (bSib & X86_SIB_BASE_MASK)
13849 {
13850 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13851 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13852 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13853 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13854 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13855 case 5:
13856 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13857 {
13858 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13859 SET_SS_DEF();
13860 }
13861 else
13862 {
13863 uint32_t u32Disp;
13864 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13865 u32EffAddr += u32Disp;
13866 }
13867 break;
13868 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13869 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13870 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13871 }
13872 break;
13873 }
13874 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13875 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13876 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13877 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13878 }
13879
13880 /* Get and add the displacement. */
13881 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13882 {
13883 case 0:
13884 break;
13885 case 1:
13886 {
13887 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13888 u32EffAddr += i8Disp;
13889 break;
13890 }
13891 case 2:
13892 {
13893 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13894 u32EffAddr += u32Disp;
13895 break;
13896 }
13897 default:
13898 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13899 }
13900 }
13901
13902 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13903 {
13904 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13905 return u32EffAddr;
13906 }
13907 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13908 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13909 return u32EffAddr & UINT16_MAX;
13910 }
13911
13912 uint64_t u64EffAddr;
13913
13914 /* Handle the rip+disp32 form with no registers first. */
13915 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13916 {
13917 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13918 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13919 }
13920 else
13921 {
13922 /* Get the register (or SIB) value. */
13923 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13924 {
13925 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13926 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13927 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13928 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13929 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13930 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13931 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13932 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13933 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13934 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13935 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13936 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13937 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13938 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13939 /* SIB */
13940 case 4:
13941 case 12:
13942 {
13943 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13944
13945 /* Get the index and scale it. */
13946 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13947 {
13948 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13949 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13950 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13951 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13952 case 4: u64EffAddr = 0; /*none */ break;
13953 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13954 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13955 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13956 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13957 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13958 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13959 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13960 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13961 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13962 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13963 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13964 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13965 }
13966 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13967
13968 /* add base */
13969 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13970 {
13971 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13972 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13973 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13974 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13975 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13976 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13977 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13978 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13979 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13980 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13981 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13982 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13983 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13984 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13985 /* complicated encodings */
13986 case 5:
13987 case 13:
13988 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13989 {
13990 if (!pVCpu->iem.s.uRexB)
13991 {
13992 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13993 SET_SS_DEF();
13994 }
13995 else
13996 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13997 }
13998 else
13999 {
14000 uint32_t u32Disp;
14001 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14002 u64EffAddr += (int32_t)u32Disp;
14003 }
14004 break;
14005 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14006 }
14007 break;
14008 }
14009 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14010 }
14011
14012 /* Get and add the displacement. */
14013 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
14014 {
14015 case 0:
14016 break;
14017 case 1:
14018 {
14019 int8_t i8Disp;
14020 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
14021 u64EffAddr += i8Disp;
14022 break;
14023 }
14024 case 2:
14025 {
14026 uint32_t u32Disp;
14027 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14028 u64EffAddr += (int32_t)u32Disp;
14029 break;
14030 }
14031 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
14032 }
14033
14034 }
14035
14036 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
14037 {
14038 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
14039 return u64EffAddr;
14040 }
14041 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
14042 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
14043 return u64EffAddr & UINT32_MAX;
14044}
14045#endif /* IEM_WITH_SETJMP */
14046
14047/** @} */
14048
14049
14050
14051/*
14052 * Include the instructions
14053 */
14054#include "IEMAllInstructions.cpp.h"
14055
14056
14057
14058#ifdef LOG_ENABLED
14059/**
14060 * Logs the current instruction.
14061 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14062 * @param fSameCtx Set if we have the same context information as the VMM,
14063 * clear if we may have already executed an instruction in
14064 * our debug context. When clear, we assume IEMCPU holds
14065 * valid CPU mode info.
14066 *
14067 * The @a fSameCtx parameter is now misleading and obsolete.
14068 * @param pszFunction The IEM function doing the execution.
14069 */
14070IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
14071{
14072# ifdef IN_RING3
14073 if (LogIs2Enabled())
14074 {
14075 char szInstr[256];
14076 uint32_t cbInstr = 0;
14077 if (fSameCtx)
14078 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14079 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14080 szInstr, sizeof(szInstr), &cbInstr);
14081 else
14082 {
14083 uint32_t fFlags = 0;
14084 switch (pVCpu->iem.s.enmCpuMode)
14085 {
14086 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14087 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14088 case IEMMODE_16BIT:
14089 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
14090 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14091 else
14092 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14093 break;
14094 }
14095 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
14096 szInstr, sizeof(szInstr), &cbInstr);
14097 }
14098
14099 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
14100 Log2(("**** %s\n"
14101 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14102 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14103 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14104 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14105 " %s\n"
14106 , pszFunction,
14107 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
14108 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
14109 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
14110 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
14111 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14112 szInstr));
14113
14114 if (LogIs3Enabled())
14115 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14116 }
14117 else
14118# endif
14119 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
14120 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
14121 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
14122}
14123#endif /* LOG_ENABLED */
14124
14125
14126#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14127/**
14128 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
14129 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
14130 *
14131 * @returns Modified rcStrict.
14132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14133 * @param rcStrict The instruction execution status.
14134 */
14135static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14136{
14137 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
14138 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
14139 {
14140 /* VMX preemption timer takes priority over NMI-window exits. */
14141 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14142 {
14143 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14144 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14145 }
14146 /*
14147 * Check remaining intercepts.
14148 *
14149 * NMI-window and Interrupt-window VM-exits.
14150 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
14151 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
14152 *
14153 * See Intel spec. 26.7.6 "NMI-Window Exiting".
14154 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
14155 */
14156 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
14157 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14158 && !TRPMHasTrap(pVCpu))
14159 {
14160 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
14161 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
14162 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
14163 {
14164 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
14165 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14166 }
14167 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
14168 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
14169 {
14170 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
14171 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
14172 }
14173 }
14174 }
14175 /* TPR-below threshold/APIC write has the highest priority. */
14176 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14177 {
14178 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14179 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14180 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14181 }
14182 /* MTF takes priority over VMX-preemption timer. */
14183 else
14184 {
14185 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
14186 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14187 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14188 }
14189 return rcStrict;
14190}
14191#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
14192
14193
14194/**
14195 * Makes status code addjustments (pass up from I/O and access handler)
14196 * as well as maintaining statistics.
14197 *
14198 * @returns Strict VBox status code to pass up.
14199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14200 * @param rcStrict The status from executing an instruction.
14201 */
14202DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14203{
14204 if (rcStrict != VINF_SUCCESS)
14205 {
14206 if (RT_SUCCESS(rcStrict))
14207 {
14208 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14209 || rcStrict == VINF_IOM_R3_IOPORT_READ
14210 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14211 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14212 || rcStrict == VINF_IOM_R3_MMIO_READ
14213 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14214 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14215 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14216 || rcStrict == VINF_CPUM_R3_MSR_READ
14217 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14218 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14219 || rcStrict == VINF_EM_RAW_TO_R3
14220 || rcStrict == VINF_EM_TRIPLE_FAULT
14221 || rcStrict == VINF_GIM_R3_HYPERCALL
14222 /* raw-mode / virt handlers only: */
14223 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14224 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14225 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14226 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14227 || rcStrict == VINF_SELM_SYNC_GDT
14228 || rcStrict == VINF_CSAM_PENDING_ACTION
14229 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14230 /* nested hw.virt codes: */
14231 || rcStrict == VINF_VMX_VMEXIT
14232 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
14233 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
14234 || rcStrict == VINF_SVM_VMEXIT
14235 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14236/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
14237 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14239 if ( rcStrict == VINF_VMX_VMEXIT
14240 && rcPassUp == VINF_SUCCESS)
14241 rcStrict = VINF_SUCCESS;
14242 else
14243#endif
14244#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14245 if ( rcStrict == VINF_SVM_VMEXIT
14246 && rcPassUp == VINF_SUCCESS)
14247 rcStrict = VINF_SUCCESS;
14248 else
14249#endif
14250 if (rcPassUp == VINF_SUCCESS)
14251 pVCpu->iem.s.cRetInfStatuses++;
14252 else if ( rcPassUp < VINF_EM_FIRST
14253 || rcPassUp > VINF_EM_LAST
14254 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14255 {
14256 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14257 pVCpu->iem.s.cRetPassUpStatus++;
14258 rcStrict = rcPassUp;
14259 }
14260 else
14261 {
14262 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14263 pVCpu->iem.s.cRetInfStatuses++;
14264 }
14265 }
14266 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14267 pVCpu->iem.s.cRetAspectNotImplemented++;
14268 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14269 pVCpu->iem.s.cRetInstrNotImplemented++;
14270 else
14271 pVCpu->iem.s.cRetErrStatuses++;
14272 }
14273 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14274 {
14275 pVCpu->iem.s.cRetPassUpStatus++;
14276 rcStrict = pVCpu->iem.s.rcPassUp;
14277 }
14278
14279 return rcStrict;
14280}
14281
14282
14283/**
14284 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14285 * IEMExecOneWithPrefetchedByPC.
14286 *
14287 * Similar code is found in IEMExecLots.
14288 *
14289 * @return Strict VBox status code.
14290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14291 * @param fExecuteInhibit If set, execute the instruction following CLI,
14292 * POP SS and MOV SS,GR.
14293 * @param pszFunction The calling function name.
14294 */
14295DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
14296{
14297 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14298 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14299 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14300 RT_NOREF_PV(pszFunction);
14301
14302#ifdef IEM_WITH_SETJMP
14303 VBOXSTRICTRC rcStrict;
14304 jmp_buf JmpBuf;
14305 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14306 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14307 if ((rcStrict = setjmp(JmpBuf)) == 0)
14308 {
14309 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14310 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14311 }
14312 else
14313 pVCpu->iem.s.cLongJumps++;
14314 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14315#else
14316 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14317 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14318#endif
14319 if (rcStrict == VINF_SUCCESS)
14320 pVCpu->iem.s.cInstructions++;
14321 if (pVCpu->iem.s.cActiveMappings > 0)
14322 {
14323 Assert(rcStrict != VINF_SUCCESS);
14324 iemMemRollback(pVCpu);
14325 }
14326 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14327 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14328 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14329
14330//#ifdef DEBUG
14331// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14332//#endif
14333
14334#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14335 /*
14336 * Perform any VMX nested-guest instruction boundary actions.
14337 *
14338 * If any of these causes a VM-exit, we must skip executing the next
14339 * instruction (would run into stale page tables). A VM-exit makes sure
14340 * there is no interrupt-inhibition, so that should ensure we don't go
14341 * to try execute the next instruction. Clearing fExecuteInhibit is
14342 * problematic because of the setjmp/longjmp clobbering above.
14343 */
14344 if ( rcStrict == VINF_SUCCESS
14345 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14346 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14347 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14348#endif
14349
14350 /* Execute the next instruction as well if a cli, pop ss or
14351 mov ss, Gr has just completed successfully. */
14352 if ( fExecuteInhibit
14353 && rcStrict == VINF_SUCCESS
14354 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14355 && EMIsInhibitInterruptsActive(pVCpu))
14356 {
14357 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14358 if (rcStrict == VINF_SUCCESS)
14359 {
14360#ifdef LOG_ENABLED
14361 iemLogCurInstr(pVCpu, false, pszFunction);
14362#endif
14363#ifdef IEM_WITH_SETJMP
14364 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14365 if ((rcStrict = setjmp(JmpBuf)) == 0)
14366 {
14367 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14368 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14369 }
14370 else
14371 pVCpu->iem.s.cLongJumps++;
14372 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14373#else
14374 IEM_OPCODE_GET_NEXT_U8(&b);
14375 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14376#endif
14377 if (rcStrict == VINF_SUCCESS)
14378 pVCpu->iem.s.cInstructions++;
14379 if (pVCpu->iem.s.cActiveMappings > 0)
14380 {
14381 Assert(rcStrict != VINF_SUCCESS);
14382 iemMemRollback(pVCpu);
14383 }
14384 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14385 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14386 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14387 }
14388 else if (pVCpu->iem.s.cActiveMappings > 0)
14389 iemMemRollback(pVCpu);
14390 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14391 }
14392
14393 /*
14394 * Return value fiddling, statistics and sanity assertions.
14395 */
14396 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14397
14398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14399 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14400 return rcStrict;
14401}
14402
14403
14404/**
14405 * Execute one instruction.
14406 *
14407 * @return Strict VBox status code.
14408 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14409 */
14410VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14411{
14412 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14413#ifdef LOG_ENABLED
14414 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14415#endif
14416
14417 /*
14418 * Do the decoding and emulation.
14419 */
14420 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14421 if (rcStrict == VINF_SUCCESS)
14422 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14423 else if (pVCpu->iem.s.cActiveMappings > 0)
14424 iemMemRollback(pVCpu);
14425
14426 if (rcStrict != VINF_SUCCESS)
14427 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14428 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14429 return rcStrict;
14430}
14431
14432
14433VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14434{
14435 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14436
14437 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14438 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14439 if (rcStrict == VINF_SUCCESS)
14440 {
14441 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14442 if (pcbWritten)
14443 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14444 }
14445 else if (pVCpu->iem.s.cActiveMappings > 0)
14446 iemMemRollback(pVCpu);
14447
14448 return rcStrict;
14449}
14450
14451
14452VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14453 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14454{
14455 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14456
14457 VBOXSTRICTRC rcStrict;
14458 if ( cbOpcodeBytes
14459 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14460 {
14461 iemInitDecoder(pVCpu, false, false);
14462#ifdef IEM_WITH_CODE_TLB
14463 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14464 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14465 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14466 pVCpu->iem.s.offCurInstrStart = 0;
14467 pVCpu->iem.s.offInstrNextByte = 0;
14468#else
14469 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14470 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14471#endif
14472 rcStrict = VINF_SUCCESS;
14473 }
14474 else
14475 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14476 if (rcStrict == VINF_SUCCESS)
14477 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14478 else if (pVCpu->iem.s.cActiveMappings > 0)
14479 iemMemRollback(pVCpu);
14480
14481 return rcStrict;
14482}
14483
14484
14485VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14486{
14487 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14488
14489 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14490 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14491 if (rcStrict == VINF_SUCCESS)
14492 {
14493 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14494 if (pcbWritten)
14495 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14496 }
14497 else if (pVCpu->iem.s.cActiveMappings > 0)
14498 iemMemRollback(pVCpu);
14499
14500 return rcStrict;
14501}
14502
14503
14504VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14505 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14506{
14507 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14508
14509 VBOXSTRICTRC rcStrict;
14510 if ( cbOpcodeBytes
14511 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14512 {
14513 iemInitDecoder(pVCpu, true, false);
14514#ifdef IEM_WITH_CODE_TLB
14515 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14516 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14517 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14518 pVCpu->iem.s.offCurInstrStart = 0;
14519 pVCpu->iem.s.offInstrNextByte = 0;
14520#else
14521 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14522 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14523#endif
14524 rcStrict = VINF_SUCCESS;
14525 }
14526 else
14527 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14528 if (rcStrict == VINF_SUCCESS)
14529 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14530 else if (pVCpu->iem.s.cActiveMappings > 0)
14531 iemMemRollback(pVCpu);
14532
14533 return rcStrict;
14534}
14535
14536
14537/**
14538 * For debugging DISGetParamSize, may come in handy.
14539 *
14540 * @returns Strict VBox status code.
14541 * @param pVCpu The cross context virtual CPU structure of the
14542 * calling EMT.
14543 * @param pCtxCore The context core structure.
14544 * @param OpcodeBytesPC The PC of the opcode bytes.
14545 * @param pvOpcodeBytes Prefeched opcode bytes.
14546 * @param cbOpcodeBytes Number of prefetched bytes.
14547 * @param pcbWritten Where to return the number of bytes written.
14548 * Optional.
14549 */
14550VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14551 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14552 uint32_t *pcbWritten)
14553{
14554 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14555
14556 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14557 VBOXSTRICTRC rcStrict;
14558 if ( cbOpcodeBytes
14559 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14560 {
14561 iemInitDecoder(pVCpu, true, false);
14562#ifdef IEM_WITH_CODE_TLB
14563 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14564 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14565 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14566 pVCpu->iem.s.offCurInstrStart = 0;
14567 pVCpu->iem.s.offInstrNextByte = 0;
14568#else
14569 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14570 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14571#endif
14572 rcStrict = VINF_SUCCESS;
14573 }
14574 else
14575 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14576 if (rcStrict == VINF_SUCCESS)
14577 {
14578 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14579 if (pcbWritten)
14580 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14581 }
14582 else if (pVCpu->iem.s.cActiveMappings > 0)
14583 iemMemRollback(pVCpu);
14584
14585 return rcStrict;
14586}
14587
14588
14589/**
14590 * For handling split cacheline lock operations when the host has split-lock
14591 * detection enabled.
14592 *
14593 * This will cause the interpreter to disregard the lock prefix and implicit
14594 * locking (xchg).
14595 *
14596 * @returns Strict VBox status code.
14597 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14598 */
14599VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14600{
14601 /*
14602 * Do the decoding and emulation.
14603 */
14604 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14605 if (rcStrict == VINF_SUCCESS)
14606 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14607 else if (pVCpu->iem.s.cActiveMappings > 0)
14608 iemMemRollback(pVCpu);
14609
14610 if (rcStrict != VINF_SUCCESS)
14611 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14612 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14613 return rcStrict;
14614}
14615
14616
14617VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14618{
14619 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14620 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14621
14622 /*
14623 * See if there is an interrupt pending in TRPM, inject it if we can.
14624 */
14625 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14626#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14627 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14628 if (fIntrEnabled)
14629 {
14630 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14631 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14632 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14633 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14634 else
14635 {
14636 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14637 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14638 }
14639 }
14640#else
14641 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14642#endif
14643
14644 /** @todo What if we are injecting an exception and not an interrupt? Is that
14645 * possible here? For now we assert it is indeed only an interrupt. */
14646 if ( fIntrEnabled
14647 && TRPMHasTrap(pVCpu)
14648 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14649 {
14650 uint8_t u8TrapNo;
14651 TRPMEVENT enmType;
14652 uint32_t uErrCode;
14653 RTGCPTR uCr2;
14654 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14655 AssertRC(rc2);
14656 Assert(enmType == TRPM_HARDWARE_INT);
14657 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14658 TRPMResetTrap(pVCpu);
14659#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14660 /* Injecting an event may cause a VM-exit. */
14661 if ( rcStrict != VINF_SUCCESS
14662 && rcStrict != VINF_IEM_RAISED_XCPT)
14663 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14664#else
14665 NOREF(rcStrict);
14666#endif
14667 }
14668
14669 /*
14670 * Initial decoder init w/ prefetch, then setup setjmp.
14671 */
14672 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14673 if (rcStrict == VINF_SUCCESS)
14674 {
14675#ifdef IEM_WITH_SETJMP
14676 jmp_buf JmpBuf;
14677 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14678 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14679 pVCpu->iem.s.cActiveMappings = 0;
14680 if ((rcStrict = setjmp(JmpBuf)) == 0)
14681#endif
14682 {
14683 /*
14684 * The run loop. We limit ourselves to 4096 instructions right now.
14685 */
14686 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14687 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14688 for (;;)
14689 {
14690 /*
14691 * Log the state.
14692 */
14693#ifdef LOG_ENABLED
14694 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14695#endif
14696
14697 /*
14698 * Do the decoding and emulation.
14699 */
14700 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14701 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14702 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14703 {
14704 Assert(pVCpu->iem.s.cActiveMappings == 0);
14705 pVCpu->iem.s.cInstructions++;
14706 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14707 {
14708 uint64_t fCpu = pVCpu->fLocalForcedActions
14709 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14710 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14711 | VMCPU_FF_TLB_FLUSH
14712 | VMCPU_FF_INHIBIT_INTERRUPTS
14713 | VMCPU_FF_BLOCK_NMIS
14714 | VMCPU_FF_UNHALT ));
14715
14716 if (RT_LIKELY( ( !fCpu
14717 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14718 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14719 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14720 {
14721 if (cMaxInstructionsGccStupidity-- > 0)
14722 {
14723 /* Poll timers every now an then according to the caller's specs. */
14724 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14725 || !TMTimerPollBool(pVM, pVCpu))
14726 {
14727 Assert(pVCpu->iem.s.cActiveMappings == 0);
14728 iemReInitDecoder(pVCpu);
14729 continue;
14730 }
14731 }
14732 }
14733 }
14734 Assert(pVCpu->iem.s.cActiveMappings == 0);
14735 }
14736 else if (pVCpu->iem.s.cActiveMappings > 0)
14737 iemMemRollback(pVCpu);
14738 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14739 break;
14740 }
14741 }
14742#ifdef IEM_WITH_SETJMP
14743 else
14744 {
14745 if (pVCpu->iem.s.cActiveMappings > 0)
14746 iemMemRollback(pVCpu);
14747# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14748 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14749# endif
14750 pVCpu->iem.s.cLongJumps++;
14751 }
14752 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14753#endif
14754
14755 /*
14756 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14757 */
14758 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14759 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14760 }
14761 else
14762 {
14763 if (pVCpu->iem.s.cActiveMappings > 0)
14764 iemMemRollback(pVCpu);
14765
14766#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14767 /*
14768 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14769 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14770 */
14771 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14772#endif
14773 }
14774
14775 /*
14776 * Maybe re-enter raw-mode and log.
14777 */
14778 if (rcStrict != VINF_SUCCESS)
14779 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14780 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14781 if (pcInstructions)
14782 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14783 return rcStrict;
14784}
14785
14786
14787/**
14788 * Interface used by EMExecuteExec, does exit statistics and limits.
14789 *
14790 * @returns Strict VBox status code.
14791 * @param pVCpu The cross context virtual CPU structure.
14792 * @param fWillExit To be defined.
14793 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14794 * @param cMaxInstructions Maximum number of instructions to execute.
14795 * @param cMaxInstructionsWithoutExits
14796 * The max number of instructions without exits.
14797 * @param pStats Where to return statistics.
14798 */
14799VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14800 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14801{
14802 NOREF(fWillExit); /** @todo define flexible exit crits */
14803
14804 /*
14805 * Initialize return stats.
14806 */
14807 pStats->cInstructions = 0;
14808 pStats->cExits = 0;
14809 pStats->cMaxExitDistance = 0;
14810 pStats->cReserved = 0;
14811
14812 /*
14813 * Initial decoder init w/ prefetch, then setup setjmp.
14814 */
14815 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14816 if (rcStrict == VINF_SUCCESS)
14817 {
14818#ifdef IEM_WITH_SETJMP
14819 jmp_buf JmpBuf;
14820 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14821 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14822 pVCpu->iem.s.cActiveMappings = 0;
14823 if ((rcStrict = setjmp(JmpBuf)) == 0)
14824#endif
14825 {
14826#ifdef IN_RING0
14827 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14828#endif
14829 uint32_t cInstructionSinceLastExit = 0;
14830
14831 /*
14832 * The run loop. We limit ourselves to 4096 instructions right now.
14833 */
14834 PVM pVM = pVCpu->CTX_SUFF(pVM);
14835 for (;;)
14836 {
14837 /*
14838 * Log the state.
14839 */
14840#ifdef LOG_ENABLED
14841 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14842#endif
14843
14844 /*
14845 * Do the decoding and emulation.
14846 */
14847 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14848
14849 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14850 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14851
14852 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14853 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14854 {
14855 pStats->cExits += 1;
14856 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14857 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14858 cInstructionSinceLastExit = 0;
14859 }
14860
14861 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14862 {
14863 Assert(pVCpu->iem.s.cActiveMappings == 0);
14864 pVCpu->iem.s.cInstructions++;
14865 pStats->cInstructions++;
14866 cInstructionSinceLastExit++;
14867 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14868 {
14869 uint64_t fCpu = pVCpu->fLocalForcedActions
14870 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14871 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14872 | VMCPU_FF_TLB_FLUSH
14873 | VMCPU_FF_INHIBIT_INTERRUPTS
14874 | VMCPU_FF_BLOCK_NMIS
14875 | VMCPU_FF_UNHALT ));
14876
14877 if (RT_LIKELY( ( ( !fCpu
14878 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14879 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14880 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14881 || pStats->cInstructions < cMinInstructions))
14882 {
14883 if (pStats->cInstructions < cMaxInstructions)
14884 {
14885 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14886 {
14887#ifdef IN_RING0
14888 if ( !fCheckPreemptionPending
14889 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14890#endif
14891 {
14892 Assert(pVCpu->iem.s.cActiveMappings == 0);
14893 iemReInitDecoder(pVCpu);
14894 continue;
14895 }
14896#ifdef IN_RING0
14897 rcStrict = VINF_EM_RAW_INTERRUPT;
14898 break;
14899#endif
14900 }
14901 }
14902 }
14903 Assert(!(fCpu & VMCPU_FF_IEM));
14904 }
14905 Assert(pVCpu->iem.s.cActiveMappings == 0);
14906 }
14907 else if (pVCpu->iem.s.cActiveMappings > 0)
14908 iemMemRollback(pVCpu);
14909 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14910 break;
14911 }
14912 }
14913#ifdef IEM_WITH_SETJMP
14914 else
14915 {
14916 if (pVCpu->iem.s.cActiveMappings > 0)
14917 iemMemRollback(pVCpu);
14918 pVCpu->iem.s.cLongJumps++;
14919 }
14920 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14921#endif
14922
14923 /*
14924 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14925 */
14926 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14927 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14928 }
14929 else
14930 {
14931 if (pVCpu->iem.s.cActiveMappings > 0)
14932 iemMemRollback(pVCpu);
14933
14934#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14935 /*
14936 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14937 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14938 */
14939 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14940#endif
14941 }
14942
14943 /*
14944 * Maybe re-enter raw-mode and log.
14945 */
14946 if (rcStrict != VINF_SUCCESS)
14947 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14948 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14949 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14950 return rcStrict;
14951}
14952
14953
14954/**
14955 * Injects a trap, fault, abort, software interrupt or external interrupt.
14956 *
14957 * The parameter list matches TRPMQueryTrapAll pretty closely.
14958 *
14959 * @returns Strict VBox status code.
14960 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14961 * @param u8TrapNo The trap number.
14962 * @param enmType What type is it (trap/fault/abort), software
14963 * interrupt or hardware interrupt.
14964 * @param uErrCode The error code if applicable.
14965 * @param uCr2 The CR2 value if applicable.
14966 * @param cbInstr The instruction length (only relevant for
14967 * software interrupts).
14968 */
14969VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14970 uint8_t cbInstr)
14971{
14972 iemInitDecoder(pVCpu, false, false);
14973#ifdef DBGFTRACE_ENABLED
14974 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14975 u8TrapNo, enmType, uErrCode, uCr2);
14976#endif
14977
14978 uint32_t fFlags;
14979 switch (enmType)
14980 {
14981 case TRPM_HARDWARE_INT:
14982 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14983 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14984 uErrCode = uCr2 = 0;
14985 break;
14986
14987 case TRPM_SOFTWARE_INT:
14988 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14989 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14990 uErrCode = uCr2 = 0;
14991 break;
14992
14993 case TRPM_TRAP:
14994 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14995 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14996 if (u8TrapNo == X86_XCPT_PF)
14997 fFlags |= IEM_XCPT_FLAGS_CR2;
14998 switch (u8TrapNo)
14999 {
15000 case X86_XCPT_DF:
15001 case X86_XCPT_TS:
15002 case X86_XCPT_NP:
15003 case X86_XCPT_SS:
15004 case X86_XCPT_PF:
15005 case X86_XCPT_AC:
15006 case X86_XCPT_GP:
15007 fFlags |= IEM_XCPT_FLAGS_ERR;
15008 break;
15009 }
15010 break;
15011
15012 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15013 }
15014
15015 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15016
15017 if (pVCpu->iem.s.cActiveMappings > 0)
15018 iemMemRollback(pVCpu);
15019
15020 return rcStrict;
15021}
15022
15023
15024/**
15025 * Injects the active TRPM event.
15026 *
15027 * @returns Strict VBox status code.
15028 * @param pVCpu The cross context virtual CPU structure.
15029 */
15030VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
15031{
15032#ifndef IEM_IMPLEMENTS_TASKSWITCH
15033 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15034#else
15035 uint8_t u8TrapNo;
15036 TRPMEVENT enmType;
15037 uint32_t uErrCode;
15038 RTGCUINTPTR uCr2;
15039 uint8_t cbInstr;
15040 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
15041 if (RT_FAILURE(rc))
15042 return rc;
15043
15044 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
15045 * ICEBP \#DB injection as a special case. */
15046 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15047#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15048 if (rcStrict == VINF_SVM_VMEXIT)
15049 rcStrict = VINF_SUCCESS;
15050#endif
15051#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15052 if (rcStrict == VINF_VMX_VMEXIT)
15053 rcStrict = VINF_SUCCESS;
15054#endif
15055 /** @todo Are there any other codes that imply the event was successfully
15056 * delivered to the guest? See @bugref{6607}. */
15057 if ( rcStrict == VINF_SUCCESS
15058 || rcStrict == VINF_IEM_RAISED_XCPT)
15059 TRPMResetTrap(pVCpu);
15060
15061 return rcStrict;
15062#endif
15063}
15064
15065
15066VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15067{
15068 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15069 return VERR_NOT_IMPLEMENTED;
15070}
15071
15072
15073VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15074{
15075 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15076 return VERR_NOT_IMPLEMENTED;
15077}
15078
15079
15080#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15081/**
15082 * Executes a IRET instruction with default operand size.
15083 *
15084 * This is for PATM.
15085 *
15086 * @returns VBox status code.
15087 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15088 * @param pCtxCore The register frame.
15089 */
15090VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
15091{
15092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15093
15094 iemCtxCoreToCtx(pCtx, pCtxCore);
15095 iemInitDecoder(pVCpu);
15096 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15097 if (rcStrict == VINF_SUCCESS)
15098 iemCtxToCtxCore(pCtxCore, pCtx);
15099 else
15100 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15101 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15102 return rcStrict;
15103}
15104#endif
15105
15106
15107/**
15108 * Macro used by the IEMExec* method to check the given instruction length.
15109 *
15110 * Will return on failure!
15111 *
15112 * @param a_cbInstr The given instruction length.
15113 * @param a_cbMin The minimum length.
15114 */
15115#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15116 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15117 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15118
15119
15120/**
15121 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15122 *
15123 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15124 *
15125 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15127 * @param rcStrict The status code to fiddle.
15128 */
15129DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
15130{
15131 iemUninitExec(pVCpu);
15132 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15133}
15134
15135
15136/**
15137 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15138 *
15139 * This API ASSUMES that the caller has already verified that the guest code is
15140 * allowed to access the I/O port. (The I/O port is in the DX register in the
15141 * guest state.)
15142 *
15143 * @returns Strict VBox status code.
15144 * @param pVCpu The cross context virtual CPU structure.
15145 * @param cbValue The size of the I/O port access (1, 2, or 4).
15146 * @param enmAddrMode The addressing mode.
15147 * @param fRepPrefix Indicates whether a repeat prefix is used
15148 * (doesn't matter which for this instruction).
15149 * @param cbInstr The instruction length in bytes.
15150 * @param iEffSeg The effective segment address.
15151 * @param fIoChecked Whether the access to the I/O port has been
15152 * checked or not. It's typically checked in the
15153 * HM scenario.
15154 */
15155VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15156 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15157{
15158 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15159 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15160
15161 /*
15162 * State init.
15163 */
15164 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15165
15166 /*
15167 * Switch orgy for getting to the right handler.
15168 */
15169 VBOXSTRICTRC rcStrict;
15170 if (fRepPrefix)
15171 {
15172 switch (enmAddrMode)
15173 {
15174 case IEMMODE_16BIT:
15175 switch (cbValue)
15176 {
15177 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15178 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15179 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15180 default:
15181 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15182 }
15183 break;
15184
15185 case IEMMODE_32BIT:
15186 switch (cbValue)
15187 {
15188 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15189 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15190 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15191 default:
15192 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15193 }
15194 break;
15195
15196 case IEMMODE_64BIT:
15197 switch (cbValue)
15198 {
15199 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15200 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15201 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15202 default:
15203 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15204 }
15205 break;
15206
15207 default:
15208 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15209 }
15210 }
15211 else
15212 {
15213 switch (enmAddrMode)
15214 {
15215 case IEMMODE_16BIT:
15216 switch (cbValue)
15217 {
15218 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15219 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15220 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15221 default:
15222 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15223 }
15224 break;
15225
15226 case IEMMODE_32BIT:
15227 switch (cbValue)
15228 {
15229 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15230 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15231 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15232 default:
15233 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15234 }
15235 break;
15236
15237 case IEMMODE_64BIT:
15238 switch (cbValue)
15239 {
15240 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15241 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15242 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15243 default:
15244 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15245 }
15246 break;
15247
15248 default:
15249 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15250 }
15251 }
15252
15253 if (pVCpu->iem.s.cActiveMappings)
15254 iemMemRollback(pVCpu);
15255
15256 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15257}
15258
15259
15260/**
15261 * Interface for HM and EM for executing string I/O IN (read) instructions.
15262 *
15263 * This API ASSUMES that the caller has already verified that the guest code is
15264 * allowed to access the I/O port. (The I/O port is in the DX register in the
15265 * guest state.)
15266 *
15267 * @returns Strict VBox status code.
15268 * @param pVCpu The cross context virtual CPU structure.
15269 * @param cbValue The size of the I/O port access (1, 2, or 4).
15270 * @param enmAddrMode The addressing mode.
15271 * @param fRepPrefix Indicates whether a repeat prefix is used
15272 * (doesn't matter which for this instruction).
15273 * @param cbInstr The instruction length in bytes.
15274 * @param fIoChecked Whether the access to the I/O port has been
15275 * checked or not. It's typically checked in the
15276 * HM scenario.
15277 */
15278VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15279 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15280{
15281 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15282
15283 /*
15284 * State init.
15285 */
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287
15288 /*
15289 * Switch orgy for getting to the right handler.
15290 */
15291 VBOXSTRICTRC rcStrict;
15292 if (fRepPrefix)
15293 {
15294 switch (enmAddrMode)
15295 {
15296 case IEMMODE_16BIT:
15297 switch (cbValue)
15298 {
15299 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15300 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15301 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15302 default:
15303 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15304 }
15305 break;
15306
15307 case IEMMODE_32BIT:
15308 switch (cbValue)
15309 {
15310 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15311 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15312 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15313 default:
15314 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15315 }
15316 break;
15317
15318 case IEMMODE_64BIT:
15319 switch (cbValue)
15320 {
15321 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15322 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15323 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15324 default:
15325 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15326 }
15327 break;
15328
15329 default:
15330 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15331 }
15332 }
15333 else
15334 {
15335 switch (enmAddrMode)
15336 {
15337 case IEMMODE_16BIT:
15338 switch (cbValue)
15339 {
15340 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15341 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15342 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15343 default:
15344 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15345 }
15346 break;
15347
15348 case IEMMODE_32BIT:
15349 switch (cbValue)
15350 {
15351 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15352 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15353 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15354 default:
15355 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15356 }
15357 break;
15358
15359 case IEMMODE_64BIT:
15360 switch (cbValue)
15361 {
15362 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15363 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15364 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15365 default:
15366 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15367 }
15368 break;
15369
15370 default:
15371 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15372 }
15373 }
15374
15375 if ( pVCpu->iem.s.cActiveMappings == 0
15376 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
15377 { /* likely */ }
15378 else
15379 {
15380 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
15381 iemMemRollback(pVCpu);
15382 }
15383 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15384}
15385
15386
15387/**
15388 * Interface for rawmode to write execute an OUT instruction.
15389 *
15390 * @returns Strict VBox status code.
15391 * @param pVCpu The cross context virtual CPU structure.
15392 * @param cbInstr The instruction length in bytes.
15393 * @param u16Port The port to read.
15394 * @param fImm Whether the port is specified using an immediate operand or
15395 * using the implicit DX register.
15396 * @param cbReg The register size.
15397 *
15398 * @remarks In ring-0 not all of the state needs to be synced in.
15399 */
15400VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15401{
15402 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15403 Assert(cbReg <= 4 && cbReg != 3);
15404
15405 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15406 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15407 Assert(!pVCpu->iem.s.cActiveMappings);
15408 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15409}
15410
15411
15412/**
15413 * Interface for rawmode to write execute an IN instruction.
15414 *
15415 * @returns Strict VBox status code.
15416 * @param pVCpu The cross context virtual CPU structure.
15417 * @param cbInstr The instruction length in bytes.
15418 * @param u16Port The port to read.
15419 * @param fImm Whether the port is specified using an immediate operand or
15420 * using the implicit DX.
15421 * @param cbReg The register size.
15422 */
15423VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15424{
15425 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15426 Assert(cbReg <= 4 && cbReg != 3);
15427
15428 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15429 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15430 Assert(!pVCpu->iem.s.cActiveMappings);
15431 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15432}
15433
15434
15435/**
15436 * Interface for HM and EM to write to a CRx register.
15437 *
15438 * @returns Strict VBox status code.
15439 * @param pVCpu The cross context virtual CPU structure.
15440 * @param cbInstr The instruction length in bytes.
15441 * @param iCrReg The control register number (destination).
15442 * @param iGReg The general purpose register number (source).
15443 *
15444 * @remarks In ring-0 not all of the state needs to be synced in.
15445 */
15446VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15447{
15448 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15449 Assert(iCrReg < 16);
15450 Assert(iGReg < 16);
15451
15452 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15453 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15454 Assert(!pVCpu->iem.s.cActiveMappings);
15455 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15456}
15457
15458
15459/**
15460 * Interface for HM and EM to read from a CRx register.
15461 *
15462 * @returns Strict VBox status code.
15463 * @param pVCpu The cross context virtual CPU structure.
15464 * @param cbInstr The instruction length in bytes.
15465 * @param iGReg The general purpose register number (destination).
15466 * @param iCrReg The control register number (source).
15467 *
15468 * @remarks In ring-0 not all of the state needs to be synced in.
15469 */
15470VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15471{
15472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15473 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15474 | CPUMCTX_EXTRN_APIC_TPR);
15475 Assert(iCrReg < 16);
15476 Assert(iGReg < 16);
15477
15478 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15479 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15480 Assert(!pVCpu->iem.s.cActiveMappings);
15481 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15482}
15483
15484
15485/**
15486 * Interface for HM and EM to clear the CR0[TS] bit.
15487 *
15488 * @returns Strict VBox status code.
15489 * @param pVCpu The cross context virtual CPU structure.
15490 * @param cbInstr The instruction length in bytes.
15491 *
15492 * @remarks In ring-0 not all of the state needs to be synced in.
15493 */
15494VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15495{
15496 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15497
15498 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15500 Assert(!pVCpu->iem.s.cActiveMappings);
15501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15502}
15503
15504
15505/**
15506 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15507 *
15508 * @returns Strict VBox status code.
15509 * @param pVCpu The cross context virtual CPU structure.
15510 * @param cbInstr The instruction length in bytes.
15511 * @param uValue The value to load into CR0.
15512 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15513 * memory operand. Otherwise pass NIL_RTGCPTR.
15514 *
15515 * @remarks In ring-0 not all of the state needs to be synced in.
15516 */
15517VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15518{
15519 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15520
15521 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15523 Assert(!pVCpu->iem.s.cActiveMappings);
15524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15525}
15526
15527
15528/**
15529 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15530 *
15531 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15532 *
15533 * @returns Strict VBox status code.
15534 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15535 * @param cbInstr The instruction length in bytes.
15536 * @remarks In ring-0 not all of the state needs to be synced in.
15537 * @thread EMT(pVCpu)
15538 */
15539VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15540{
15541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15542
15543 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15545 Assert(!pVCpu->iem.s.cActiveMappings);
15546 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15547}
15548
15549
15550/**
15551 * Interface for HM and EM to emulate the WBINVD instruction.
15552 *
15553 * @returns Strict VBox status code.
15554 * @param pVCpu The cross context virtual CPU structure.
15555 * @param cbInstr The instruction length in bytes.
15556 *
15557 * @remarks In ring-0 not all of the state needs to be synced in.
15558 */
15559VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15560{
15561 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15562
15563 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15564 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15565 Assert(!pVCpu->iem.s.cActiveMappings);
15566 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15567}
15568
15569
15570/**
15571 * Interface for HM and EM to emulate the INVD instruction.
15572 *
15573 * @returns Strict VBox status code.
15574 * @param pVCpu The cross context virtual CPU structure.
15575 * @param cbInstr The instruction length in bytes.
15576 *
15577 * @remarks In ring-0 not all of the state needs to be synced in.
15578 */
15579VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15580{
15581 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15582
15583 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15585 Assert(!pVCpu->iem.s.cActiveMappings);
15586 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15587}
15588
15589
15590/**
15591 * Interface for HM and EM to emulate the INVLPG instruction.
15592 *
15593 * @returns Strict VBox status code.
15594 * @retval VINF_PGM_SYNC_CR3
15595 *
15596 * @param pVCpu The cross context virtual CPU structure.
15597 * @param cbInstr The instruction length in bytes.
15598 * @param GCPtrPage The effective address of the page to invalidate.
15599 *
15600 * @remarks In ring-0 not all of the state needs to be synced in.
15601 */
15602VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15603{
15604 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15605
15606 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15607 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15608 Assert(!pVCpu->iem.s.cActiveMappings);
15609 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15610}
15611
15612
15613/**
15614 * Interface for HM and EM to emulate the INVPCID instruction.
15615 *
15616 * @returns Strict VBox status code.
15617 * @retval VINF_PGM_SYNC_CR3
15618 *
15619 * @param pVCpu The cross context virtual CPU structure.
15620 * @param cbInstr The instruction length in bytes.
15621 * @param iEffSeg The effective segment register.
15622 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15623 * @param uType The invalidation type.
15624 *
15625 * @remarks In ring-0 not all of the state needs to be synced in.
15626 */
15627VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15628 uint64_t uType)
15629{
15630 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15631
15632 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15633 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15634 Assert(!pVCpu->iem.s.cActiveMappings);
15635 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15636}
15637
15638
15639/**
15640 * Interface for HM and EM to emulate the CPUID instruction.
15641 *
15642 * @returns Strict VBox status code.
15643 *
15644 * @param pVCpu The cross context virtual CPU structure.
15645 * @param cbInstr The instruction length in bytes.
15646 *
15647 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15648 */
15649VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15650{
15651 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15652 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15653
15654 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15655 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15656 Assert(!pVCpu->iem.s.cActiveMappings);
15657 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15658}
15659
15660
15661/**
15662 * Interface for HM and EM to emulate the RDPMC instruction.
15663 *
15664 * @returns Strict VBox status code.
15665 *
15666 * @param pVCpu The cross context virtual CPU structure.
15667 * @param cbInstr The instruction length in bytes.
15668 *
15669 * @remarks Not all of the state needs to be synced in.
15670 */
15671VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15672{
15673 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15674 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15675
15676 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15677 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15678 Assert(!pVCpu->iem.s.cActiveMappings);
15679 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to emulate the RDTSC instruction.
15685 *
15686 * @returns Strict VBox status code.
15687 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15688 *
15689 * @param pVCpu The cross context virtual CPU structure.
15690 * @param cbInstr The instruction length in bytes.
15691 *
15692 * @remarks Not all of the state needs to be synced in.
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15695{
15696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15697 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15698
15699 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15701 Assert(!pVCpu->iem.s.cActiveMappings);
15702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15703}
15704
15705
15706/**
15707 * Interface for HM and EM to emulate the RDTSCP instruction.
15708 *
15709 * @returns Strict VBox status code.
15710 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15711 *
15712 * @param pVCpu The cross context virtual CPU structure.
15713 * @param cbInstr The instruction length in bytes.
15714 *
15715 * @remarks Not all of the state needs to be synced in. Recommended
15716 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15717 */
15718VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15719{
15720 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15721 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15722
15723 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15725 Assert(!pVCpu->iem.s.cActiveMappings);
15726 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15727}
15728
15729
15730/**
15731 * Interface for HM and EM to emulate the RDMSR instruction.
15732 *
15733 * @returns Strict VBox status code.
15734 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15735 *
15736 * @param pVCpu The cross context virtual CPU structure.
15737 * @param cbInstr The instruction length in bytes.
15738 *
15739 * @remarks Not all of the state needs to be synced in. Requires RCX and
15740 * (currently) all MSRs.
15741 */
15742VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15743{
15744 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15745 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15746
15747 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15748 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15749 Assert(!pVCpu->iem.s.cActiveMappings);
15750 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15751}
15752
15753
15754/**
15755 * Interface for HM and EM to emulate the WRMSR instruction.
15756 *
15757 * @returns Strict VBox status code.
15758 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15759 *
15760 * @param pVCpu The cross context virtual CPU structure.
15761 * @param cbInstr The instruction length in bytes.
15762 *
15763 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15764 * and (currently) all MSRs.
15765 */
15766VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15767{
15768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15769 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15770 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15771
15772 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15773 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15774 Assert(!pVCpu->iem.s.cActiveMappings);
15775 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15776}
15777
15778
15779/**
15780 * Interface for HM and EM to emulate the MONITOR instruction.
15781 *
15782 * @returns Strict VBox status code.
15783 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15784 *
15785 * @param pVCpu The cross context virtual CPU structure.
15786 * @param cbInstr The instruction length in bytes.
15787 *
15788 * @remarks Not all of the state needs to be synced in.
15789 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15790 * are used.
15791 */
15792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15793{
15794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15795 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15796
15797 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15798 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15799 Assert(!pVCpu->iem.s.cActiveMappings);
15800 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15801}
15802
15803
15804/**
15805 * Interface for HM and EM to emulate the MWAIT instruction.
15806 *
15807 * @returns Strict VBox status code.
15808 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15809 *
15810 * @param pVCpu The cross context virtual CPU structure.
15811 * @param cbInstr The instruction length in bytes.
15812 *
15813 * @remarks Not all of the state needs to be synced in.
15814 */
15815VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15816{
15817 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15818 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15819
15820 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15821 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15822 Assert(!pVCpu->iem.s.cActiveMappings);
15823 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15824}
15825
15826
15827/**
15828 * Interface for HM and EM to emulate the HLT instruction.
15829 *
15830 * @returns Strict VBox status code.
15831 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15832 *
15833 * @param pVCpu The cross context virtual CPU structure.
15834 * @param cbInstr The instruction length in bytes.
15835 *
15836 * @remarks Not all of the state needs to be synced in.
15837 */
15838VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15839{
15840 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15841
15842 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15843 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15844 Assert(!pVCpu->iem.s.cActiveMappings);
15845 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15846}
15847
15848
15849/**
15850 * Checks if IEM is in the process of delivering an event (interrupt or
15851 * exception).
15852 *
15853 * @returns true if we're in the process of raising an interrupt or exception,
15854 * false otherwise.
15855 * @param pVCpu The cross context virtual CPU structure.
15856 * @param puVector Where to store the vector associated with the
15857 * currently delivered event, optional.
15858 * @param pfFlags Where to store th event delivery flags (see
15859 * IEM_XCPT_FLAGS_XXX), optional.
15860 * @param puErr Where to store the error code associated with the
15861 * event, optional.
15862 * @param puCr2 Where to store the CR2 associated with the event,
15863 * optional.
15864 * @remarks The caller should check the flags to determine if the error code and
15865 * CR2 are valid for the event.
15866 */
15867VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15868{
15869 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15870 if (fRaisingXcpt)
15871 {
15872 if (puVector)
15873 *puVector = pVCpu->iem.s.uCurXcpt;
15874 if (pfFlags)
15875 *pfFlags = pVCpu->iem.s.fCurXcpt;
15876 if (puErr)
15877 *puErr = pVCpu->iem.s.uCurXcptErr;
15878 if (puCr2)
15879 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15880 }
15881 return fRaisingXcpt;
15882}
15883
15884#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15885
15886/**
15887 * Interface for HM and EM to emulate the CLGI instruction.
15888 *
15889 * @returns Strict VBox status code.
15890 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15891 * @param cbInstr The instruction length in bytes.
15892 * @thread EMT(pVCpu)
15893 */
15894VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15895{
15896 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15897
15898 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15899 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15900 Assert(!pVCpu->iem.s.cActiveMappings);
15901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15902}
15903
15904
15905/**
15906 * Interface for HM and EM to emulate the STGI instruction.
15907 *
15908 * @returns Strict VBox status code.
15909 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15910 * @param cbInstr The instruction length in bytes.
15911 * @thread EMT(pVCpu)
15912 */
15913VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15914{
15915 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15916
15917 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15918 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15919 Assert(!pVCpu->iem.s.cActiveMappings);
15920 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15921}
15922
15923
15924/**
15925 * Interface for HM and EM to emulate the VMLOAD instruction.
15926 *
15927 * @returns Strict VBox status code.
15928 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15929 * @param cbInstr The instruction length in bytes.
15930 * @thread EMT(pVCpu)
15931 */
15932VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15933{
15934 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15935
15936 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15937 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15938 Assert(!pVCpu->iem.s.cActiveMappings);
15939 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15940}
15941
15942
15943/**
15944 * Interface for HM and EM to emulate the VMSAVE instruction.
15945 *
15946 * @returns Strict VBox status code.
15947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15948 * @param cbInstr The instruction length in bytes.
15949 * @thread EMT(pVCpu)
15950 */
15951VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15952{
15953 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15954
15955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15957 Assert(!pVCpu->iem.s.cActiveMappings);
15958 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15959}
15960
15961
15962/**
15963 * Interface for HM and EM to emulate the INVLPGA instruction.
15964 *
15965 * @returns Strict VBox status code.
15966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15967 * @param cbInstr The instruction length in bytes.
15968 * @thread EMT(pVCpu)
15969 */
15970VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15971{
15972 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15973
15974 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15975 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15976 Assert(!pVCpu->iem.s.cActiveMappings);
15977 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15978}
15979
15980
15981/**
15982 * Interface for HM and EM to emulate the VMRUN instruction.
15983 *
15984 * @returns Strict VBox status code.
15985 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15986 * @param cbInstr The instruction length in bytes.
15987 * @thread EMT(pVCpu)
15988 */
15989VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15990{
15991 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15992 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15993
15994 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15995 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15996 Assert(!pVCpu->iem.s.cActiveMappings);
15997 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15998}
15999
16000
16001/**
16002 * Interface for HM and EM to emulate \#VMEXIT.
16003 *
16004 * @returns Strict VBox status code.
16005 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16006 * @param uExitCode The exit code.
16007 * @param uExitInfo1 The exit info. 1 field.
16008 * @param uExitInfo2 The exit info. 2 field.
16009 * @thread EMT(pVCpu)
16010 */
16011VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16012{
16013 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
16014 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
16015 if (pVCpu->iem.s.cActiveMappings)
16016 iemMemRollback(pVCpu);
16017 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16018}
16019
16020#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
16021
16022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
16023
16024/**
16025 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
16026 *
16027 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
16028 * are performed. Bounds checks are strict builds only.
16029 *
16030 * @param pVmcs Pointer to the virtual VMCS.
16031 * @param u64VmcsField The VMCS field.
16032 * @param pu64Dst Where to store the VMCS value.
16033 *
16034 * @remarks May be called with interrupts disabled.
16035 * @todo This should probably be moved to CPUM someday.
16036 */
16037VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
16038{
16039 AssertPtr(pVmcs);
16040 AssertPtr(pu64Dst);
16041 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
16042}
16043
16044
16045/**
16046 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
16047 *
16048 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
16049 * are performed. Bounds checks are strict builds only.
16050 *
16051 * @param pVmcs Pointer to the virtual VMCS.
16052 * @param u64VmcsField The VMCS field.
16053 * @param u64Val The value to write.
16054 *
16055 * @remarks May be called with interrupts disabled.
16056 * @todo This should probably be moved to CPUM someday.
16057 */
16058VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
16059{
16060 AssertPtr(pVmcs);
16061 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
16062}
16063
16064
16065/**
16066 * Interface for HM and EM to virtualize x2APIC MSR accesses.
16067 *
16068 * @returns Strict VBox status code.
16069 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
16070 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
16071 * the x2APIC device.
16072 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
16073 *
16074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16075 * @param idMsr The MSR being read.
16076 * @param pu64Value Pointer to the value being written or where to store the
16077 * value being read.
16078 * @param fWrite Whether this is an MSR write or read access.
16079 * @thread EMT(pVCpu)
16080 */
16081VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
16082{
16083 Assert(pu64Value);
16084
16085 VBOXSTRICTRC rcStrict;
16086 if (fWrite)
16087 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
16088 else
16089 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
16090 Assert(!pVCpu->iem.s.cActiveMappings);
16091 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16092
16093}
16094
16095
16096/**
16097 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
16098 *
16099 * @returns Strict VBox status code.
16100 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
16101 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
16102 *
16103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16104 * @param pExitInfo Pointer to the VM-exit information.
16105 * @param pExitEventInfo Pointer to the VM-exit event information.
16106 * @thread EMT(pVCpu)
16107 */
16108VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16109{
16110 Assert(pExitInfo);
16111 Assert(pExitEventInfo);
16112 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16113 Assert(!pVCpu->iem.s.cActiveMappings);
16114 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16115
16116}
16117
16118
16119/**
16120 * Interface for HM and EM to perform an APIC-write emulation which may cause a
16121 * VM-exit.
16122 *
16123 * @returns Strict VBox status code.
16124 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16125 * @thread EMT(pVCpu)
16126 */
16127VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
16128{
16129 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
16130 Assert(!pVCpu->iem.s.cActiveMappings);
16131 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16132}
16133
16134
16135/**
16136 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
16137 *
16138 * @returns Strict VBox status code.
16139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16140 * @thread EMT(pVCpu)
16141 */
16142VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
16143{
16144 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
16145 Assert(!pVCpu->iem.s.cActiveMappings);
16146 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16147}
16148
16149
16150/**
16151 * Interface for HM and EM to emulate VM-exit due to external interrupts.
16152 *
16153 * @returns Strict VBox status code.
16154 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16155 * @param uVector The external interrupt vector (pass 0 if the external
16156 * interrupt is still pending).
16157 * @param fIntPending Whether the external interrupt is pending or
16158 * acknowdledged in the interrupt controller.
16159 * @thread EMT(pVCpu)
16160 */
16161VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
16162{
16163 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
16164 Assert(!pVCpu->iem.s.cActiveMappings);
16165 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16166}
16167
16168
16169/**
16170 * Interface for HM and EM to emulate VM-exit due to exceptions.
16171 *
16172 * Exception includes NMIs, software exceptions (those generated by INT3 or
16173 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
16174 *
16175 * @returns Strict VBox status code.
16176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16177 * @param pExitInfo Pointer to the VM-exit information.
16178 * @param pExitEventInfo Pointer to the VM-exit event information.
16179 * @thread EMT(pVCpu)
16180 */
16181VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16182{
16183 Assert(pExitInfo);
16184 Assert(pExitEventInfo);
16185 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16186 Assert(!pVCpu->iem.s.cActiveMappings);
16187 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16188}
16189
16190
16191/**
16192 * Interface for HM and EM to emulate VM-exit due to NMIs.
16193 *
16194 * @returns Strict VBox status code.
16195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16196 * @thread EMT(pVCpu)
16197 */
16198VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
16199{
16200 VMXVEXITINFO ExitInfo;
16201 RT_ZERO(ExitInfo);
16202 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
16203
16204 VMXVEXITEVENTINFO ExitEventInfo;
16205 RT_ZERO(ExitEventInfo);
16206 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
16207 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
16208 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
16209
16210 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
16211 Assert(!pVCpu->iem.s.cActiveMappings);
16212 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16213}
16214
16215
16216/**
16217 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
16218 *
16219 * @returns Strict VBox status code.
16220 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16221 * @thread EMT(pVCpu)
16222 */
16223VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
16224{
16225 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
16226 Assert(!pVCpu->iem.s.cActiveMappings);
16227 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16228}
16229
16230
16231/**
16232 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
16233 *
16234 * @returns Strict VBox status code.
16235 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16236 * @param uVector The SIPI vector.
16237 * @thread EMT(pVCpu)
16238 */
16239VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
16240{
16241 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
16242 Assert(!pVCpu->iem.s.cActiveMappings);
16243 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16244}
16245
16246
16247/**
16248 * Interface for HM and EM to emulate a VM-exit.
16249 *
16250 * If a specialized version of a VM-exit handler exists, that must be used instead.
16251 *
16252 * @returns Strict VBox status code.
16253 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16254 * @param uExitReason The VM-exit reason.
16255 * @param u64ExitQual The Exit qualification.
16256 * @thread EMT(pVCpu)
16257 */
16258VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
16259{
16260 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
16261 Assert(!pVCpu->iem.s.cActiveMappings);
16262 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16263}
16264
16265
16266/**
16267 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16268 *
16269 * This is meant to be used for those instructions that VMX provides additional
16270 * decoding information beyond just the instruction length!
16271 *
16272 * @returns Strict VBox status code.
16273 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16274 * @param pExitInfo Pointer to the VM-exit information.
16275 * @thread EMT(pVCpu)
16276 */
16277VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16278{
16279 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
16280 Assert(!pVCpu->iem.s.cActiveMappings);
16281 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16282}
16283
16284
16285/**
16286 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16287 *
16288 * This is meant to be used for those instructions that VMX provides only the
16289 * instruction length.
16290 *
16291 * @returns Strict VBox status code.
16292 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16293 * @param pExitInfo Pointer to the VM-exit information.
16294 * @param cbInstr The instruction length in bytes.
16295 * @thread EMT(pVCpu)
16296 */
16297VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16298{
16299 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16300 Assert(!pVCpu->iem.s.cActiveMappings);
16301 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16302}
16303
16304
16305/**
16306 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
16307 * Virtualized-EOI, TPR-below threshold).
16308 *
16309 * @returns Strict VBox status code.
16310 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16311 * @param pExitInfo Pointer to the VM-exit information.
16312 * @thread EMT(pVCpu)
16313 */
16314VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16315{
16316 Assert(pExitInfo);
16317 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
16318 Assert(!pVCpu->iem.s.cActiveMappings);
16319 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16320}
16321
16322
16323/**
16324 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16325 *
16326 * @returns Strict VBox status code.
16327 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16328 * @param pExitInfo Pointer to the VM-exit information.
16329 * @param pExitEventInfo Pointer to the VM-exit event information.
16330 * @thread EMT(pVCpu)
16331 */
16332VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16333{
16334 Assert(pExitInfo);
16335 Assert(pExitEventInfo);
16336 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16337 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16338 Assert(!pVCpu->iem.s.cActiveMappings);
16339 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16340}
16341
16342
16343/**
16344 * Interface for HM and EM to emulate the VMREAD instruction.
16345 *
16346 * @returns Strict VBox status code.
16347 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16348 * @param pExitInfo Pointer to the VM-exit information.
16349 * @thread EMT(pVCpu)
16350 */
16351VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16352{
16353 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16354 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16355 Assert(pExitInfo);
16356
16357 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16358
16359 VBOXSTRICTRC rcStrict;
16360 uint8_t const cbInstr = pExitInfo->cbInstr;
16361 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16362 uint64_t const u64FieldEnc = fIs64BitMode
16363 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16364 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16365 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16366 {
16367 if (fIs64BitMode)
16368 {
16369 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16370 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16371 }
16372 else
16373 {
16374 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16375 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16376 }
16377 }
16378 else
16379 {
16380 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16381 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16382 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16383 }
16384 Assert(!pVCpu->iem.s.cActiveMappings);
16385 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16386}
16387
16388
16389/**
16390 * Interface for HM and EM to emulate the VMWRITE instruction.
16391 *
16392 * @returns Strict VBox status code.
16393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16394 * @param pExitInfo Pointer to the VM-exit information.
16395 * @thread EMT(pVCpu)
16396 */
16397VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16398{
16399 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16400 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16401 Assert(pExitInfo);
16402
16403 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16404
16405 uint64_t u64Val;
16406 uint8_t iEffSeg;
16407 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16408 {
16409 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16410 iEffSeg = UINT8_MAX;
16411 }
16412 else
16413 {
16414 u64Val = pExitInfo->GCPtrEffAddr;
16415 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16416 }
16417 uint8_t const cbInstr = pExitInfo->cbInstr;
16418 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16419 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16420 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16421 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16422 Assert(!pVCpu->iem.s.cActiveMappings);
16423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16424}
16425
16426
16427/**
16428 * Interface for HM and EM to emulate the VMPTRLD instruction.
16429 *
16430 * @returns Strict VBox status code.
16431 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16432 * @param pExitInfo Pointer to the VM-exit information.
16433 * @thread EMT(pVCpu)
16434 */
16435VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16436{
16437 Assert(pExitInfo);
16438 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16439 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16440
16441 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16442
16443 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16444 uint8_t const cbInstr = pExitInfo->cbInstr;
16445 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16446 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16447 Assert(!pVCpu->iem.s.cActiveMappings);
16448 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16449}
16450
16451
16452/**
16453 * Interface for HM and EM to emulate the VMPTRST instruction.
16454 *
16455 * @returns Strict VBox status code.
16456 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16457 * @param pExitInfo Pointer to the VM-exit information.
16458 * @thread EMT(pVCpu)
16459 */
16460VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16461{
16462 Assert(pExitInfo);
16463 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16464 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16465
16466 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16467
16468 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16469 uint8_t const cbInstr = pExitInfo->cbInstr;
16470 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16471 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16472 Assert(!pVCpu->iem.s.cActiveMappings);
16473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16474}
16475
16476
16477/**
16478 * Interface for HM and EM to emulate the VMCLEAR instruction.
16479 *
16480 * @returns Strict VBox status code.
16481 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16482 * @param pExitInfo Pointer to the VM-exit information.
16483 * @thread EMT(pVCpu)
16484 */
16485VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16486{
16487 Assert(pExitInfo);
16488 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16489 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16490
16491 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16492
16493 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16494 uint8_t const cbInstr = pExitInfo->cbInstr;
16495 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16496 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16497 Assert(!pVCpu->iem.s.cActiveMappings);
16498 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16499}
16500
16501
16502/**
16503 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16504 *
16505 * @returns Strict VBox status code.
16506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16507 * @param cbInstr The instruction length in bytes.
16508 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16509 * VMXINSTRID_VMRESUME).
16510 * @thread EMT(pVCpu)
16511 */
16512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16513{
16514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16515 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16516
16517 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16518 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16519 Assert(!pVCpu->iem.s.cActiveMappings);
16520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16521}
16522
16523
16524/**
16525 * Interface for HM and EM to emulate the VMXON instruction.
16526 *
16527 * @returns Strict VBox status code.
16528 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16529 * @param pExitInfo Pointer to the VM-exit information.
16530 * @thread EMT(pVCpu)
16531 */
16532VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16533{
16534 Assert(pExitInfo);
16535 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16536 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16537
16538 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16539
16540 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16541 uint8_t const cbInstr = pExitInfo->cbInstr;
16542 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16543 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16544 Assert(!pVCpu->iem.s.cActiveMappings);
16545 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16546}
16547
16548
16549/**
16550 * Interface for HM and EM to emulate the VMXOFF instruction.
16551 *
16552 * @returns Strict VBox status code.
16553 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16554 * @param cbInstr The instruction length in bytes.
16555 * @thread EMT(pVCpu)
16556 */
16557VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16558{
16559 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16560 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16561
16562 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16563 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16564 Assert(!pVCpu->iem.s.cActiveMappings);
16565 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16566}
16567
16568
16569/**
16570 * Interface for HM and EM to emulate the INVVPID instruction.
16571 *
16572 * @returns Strict VBox status code.
16573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16574 * @param pExitInfo Pointer to the VM-exit information.
16575 * @thread EMT(pVCpu)
16576 */
16577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16578{
16579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16580 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16581 Assert(pExitInfo);
16582
16583 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16584
16585 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16586 uint8_t const cbInstr = pExitInfo->cbInstr;
16587 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16588 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16589 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16590 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16591 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16592 Assert(!pVCpu->iem.s.cActiveMappings);
16593 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16594}
16595
16596
16597# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
16598/**
16599 * Interface for HM and EM to emulate the INVEPT instruction.
16600 *
16601 * @returns Strict VBox status code.
16602 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16603 * @param pExitInfo Pointer to the VM-exit information.
16604 * @thread EMT(pVCpu)
16605 */
16606VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16607{
16608 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16609 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16610 Assert(pExitInfo);
16611
16612 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16613
16614 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16615 uint8_t const cbInstr = pExitInfo->cbInstr;
16616 RTGCPTR const GCPtrInveptDesc = pExitInfo->GCPtrEffAddr;
16617 uint64_t const u64InveptType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16618 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16619 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16620 VBOXSTRICTRC rcStrict = iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, u64InveptType, pExitInfo);
16621 Assert(!pVCpu->iem.s.cActiveMappings);
16622 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16623}
16624
16625
16626/**
16627 * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
16628 *
16629 * @returns Strict VBox status code.
16630 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16631 * @param pExitInfo Pointer to the VM-exit information.
16632 * @param pExitEventInfo Pointer to the VM-exit event information.
16633 * @thread EMT(pVCpu)
16634 */
16635VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
16636 PCVMXVEXITEVENTINFO pExitEventInfo)
16637{
16638 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16639
16640 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16641 VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16642 Assert(!pVCpu->iem.s.cActiveMappings);
16643 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16644}
16645
16646
16647/**
16648 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
16649 *
16650 * @returns Strict VBox status code.
16651 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16652 * @param GCPhysAddr The nested-guest physical address causing the EPT
16653 * misconfiguration.
16654 * @param pExitEventInfo Pointer to the VM-exit event information.
16655 * @thread EMT(pVCpu)
16656 */
16657VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
16658{
16659 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16660
16661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16662 VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
16663 Assert(!pVCpu->iem.s.cActiveMappings);
16664 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16665}
16666
16667# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
16668
16669
16670/**
16671 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16672 *
16673 * @remarks The @a uUser argument is currently unused.
16674 */
16675PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16676 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16677 PGMACCESSORIGIN enmOrigin, uint64_t uUser)
16678{
16679 RT_NOREF3(pvPhys, enmOrigin, uUser);
16680
16681 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16682 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16683 {
16684 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16685 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16686
16687 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
16688 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16689 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16690 if (RT_FAILURE(rcStrict))
16691 return rcStrict;
16692
16693 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16694 return VINF_SUCCESS;
16695 }
16696
16697 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
16698 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16699 if (RT_FAILURE(rc))
16700 return rc;
16701
16702 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16703 return VINF_PGM_HANDLER_DO_DEFAULT;
16704}
16705
16706
16707# ifndef IN_RING3
16708/**
16709 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
16710 * \#PF access handler callback for guest VMX APIC-access page.}
16711 */
16712DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
16713 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
16714
16715{
16716 RT_NOREF4(pVM, pRegFrame, pvFault, uUser);
16717
16718 /*
16719 * Handle the VMX APIC-access page only when the guest is in VMX non-root mode.
16720 * Otherwise we must deregister the page and allow regular RAM access.
16721 * Failing to do so lands us with endless EPT misconfiguration VM-exits.
16722 */
16723 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16724 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16725 {
16726 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16727 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16728
16729 /*
16730 * Check if the access causes an APIC-access VM-exit.
16731 */
16732 uint32_t fAccess;
16733 if (uErr & X86_TRAP_PF_ID)
16734 fAccess = IEM_ACCESS_INSTRUCTION;
16735 else if (uErr & X86_TRAP_PF_RW)
16736 fAccess = IEM_ACCESS_DATA_W;
16737 else
16738 fAccess = IEM_ACCESS_DATA_R;
16739
16740 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16741 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess);
16742 if (fIntercept)
16743 {
16744 /*
16745 * Query the source VM-exit (from the execution engine) that caused this access
16746 * within the APIC-access page. Currently only HM is supported.
16747 */
16748 AssertMsgReturn(VM_IS_HM_ENABLED(pVM),
16749 ("VM-exit auxiliary info. fetching not supported for execution engine %d\n",
16750 pVM->bMainExecutionEngine), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
16751 HMEXITAUX HmExitAux;
16752 RT_ZERO(HmExitAux);
16753 int const rc = HMR0GetExitAuxInfo(pVCpu, &HmExitAux, HMVMX_READ_EXIT_INSTR_LEN
16754 | HMVMX_READ_EXIT_QUALIFICATION
16755 | HMVMX_READ_IDT_VECTORING_INFO
16756 | HMVMX_READ_IDT_VECTORING_ERROR_CODE);
16757 AssertRCReturn(rc, rc);
16758
16759 /*
16760 * Verify the VM-exit reason must be an EPT violation.
16761 * Other accesses should go through the other handler (iemVmxApicAccessPageHandler).
16762 */
16763 AssertLogRelMsgReturn(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
16764 ("Unexpected call to the VMX APIC-access page #PF handler for %#RGp (off=%u) uReason=%#RX32\n",
16765 GCPhysAccessBase, offAccess, HmExitAux.Vmx.uReason), VERR_IEM_IPE_9);
16766
16767 /*
16768 * Construct the virtual APIC-access VM-exit.
16769 */
16770 VMXAPICACCESS enmAccess;
16771 if (HmExitAux.Vmx.u64Qual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID)
16772 {
16773 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
16774 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
16775 else if (fAccess == IEM_ACCESS_INSTRUCTION)
16776 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
16777 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
16778 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
16779 else
16780 enmAccess = VMXAPICACCESS_LINEAR_READ;
16781 }
16782 else
16783 {
16784 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
16785 enmAccess = VMXAPICACCESS_PHYSICAL_EVENT_DELIVERY;
16786 else
16787 {
16788 /** @todo How to distinguish between monitoring/trace vs other instructions
16789 * here? */
16790 enmAccess = VMXAPICACCESS_PHYSICAL_INSTR;
16791 }
16792 }
16793
16794 VMXVEXITINFO ExitInfo;
16795 RT_ZERO(ExitInfo);
16796 ExitInfo.uReason = VMX_EXIT_APIC_ACCESS;
16797 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
16798 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
16799 ExitInfo.cbInstr = HmExitAux.Vmx.cbInstr;
16800
16801 VMXVEXITEVENTINFO ExitEventInfo;
16802 RT_ZERO(ExitEventInfo);
16803 ExitEventInfo.uIdtVectoringInfo = HmExitAux.Vmx.uIdtVectoringInfo;
16804 ExitEventInfo.uIdtVectoringErrCode = HmExitAux.Vmx.uIdtVectoringErrCode;
16805
16806 /*
16807 * Raise the APIC-access VM-exit.
16808 */
16809 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
16810 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16811 }
16812
16813 /*
16814 * The access isn't intercepted, which means it needs to be virtualized.
16815 *
16816 * This requires emulating the instruction because we need the bytes being
16817 * read/written by the instruction not just the offset being accessed within
16818 * the APIC-access (which we derive from the faulting address).
16819 */
16820 return VINF_EM_RAW_EMULATE_INSTR;
16821 }
16822
16823 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
16824 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16825 if (RT_FAILURE(rc))
16826 return rc;
16827
16828 return VINF_SUCCESS;
16829}
16830# endif /* !IN_RING3 */
16831#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16832
16833
16834#ifdef IN_RING3
16835
16836/**
16837 * Handles the unlikely and probably fatal merge cases.
16838 *
16839 * @returns Merged status code.
16840 * @param rcStrict Current EM status code.
16841 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16842 * with @a rcStrict.
16843 * @param iMemMap The memory mapping index. For error reporting only.
16844 * @param pVCpu The cross context virtual CPU structure of the calling
16845 * thread, for error reporting only.
16846 */
16847DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16848 unsigned iMemMap, PVMCPUCC pVCpu)
16849{
16850 if (RT_FAILURE_NP(rcStrict))
16851 return rcStrict;
16852
16853 if (RT_FAILURE_NP(rcStrictCommit))
16854 return rcStrictCommit;
16855
16856 if (rcStrict == rcStrictCommit)
16857 return rcStrictCommit;
16858
16859 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16860 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16861 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16864 return VERR_IOM_FF_STATUS_IPE;
16865}
16866
16867
16868/**
16869 * Helper for IOMR3ProcessForceFlag.
16870 *
16871 * @returns Merged status code.
16872 * @param rcStrict Current EM status code.
16873 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16874 * with @a rcStrict.
16875 * @param iMemMap The memory mapping index. For error reporting only.
16876 * @param pVCpu The cross context virtual CPU structure of the calling
16877 * thread, for error reporting only.
16878 */
16879DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16880{
16881 /* Simple. */
16882 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16883 return rcStrictCommit;
16884
16885 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16886 return rcStrict;
16887
16888 /* EM scheduling status codes. */
16889 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16890 && rcStrict <= VINF_EM_LAST))
16891 {
16892 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16893 && rcStrictCommit <= VINF_EM_LAST))
16894 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16895 }
16896
16897 /* Unlikely */
16898 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16899}
16900
16901
16902/**
16903 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16904 *
16905 * @returns Merge between @a rcStrict and what the commit operation returned.
16906 * @param pVM The cross context VM structure.
16907 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16908 * @param rcStrict The status code returned by ring-0 or raw-mode.
16909 */
16910VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16911{
16912 /*
16913 * Reset the pending commit.
16914 */
16915 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16916 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16917 ("%#x %#x %#x\n",
16918 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16919 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16920
16921 /*
16922 * Commit the pending bounce buffers (usually just one).
16923 */
16924 unsigned cBufs = 0;
16925 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16926 while (iMemMap-- > 0)
16927 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16928 {
16929 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16930 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16931 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16932
16933 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16934 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16935 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16936
16937 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16938 {
16939 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16940 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16941 pbBuf,
16942 cbFirst,
16943 PGMACCESSORIGIN_IEM);
16944 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16945 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16946 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16947 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16948 }
16949
16950 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16951 {
16952 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16954 pbBuf + cbFirst,
16955 cbSecond,
16956 PGMACCESSORIGIN_IEM);
16957 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16958 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16959 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16960 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16961 }
16962 cBufs++;
16963 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16964 }
16965
16966 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16967 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16968 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16969 pVCpu->iem.s.cActiveMappings = 0;
16970 return rcStrict;
16971}
16972
16973#endif /* IN_RING3 */
16974
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette