VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74661

Last change on this file since 74661 was 74661, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; Added IN/OUT intercepts.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 628.9 KB
Line 
1/* $Id: IEMAll.cpp 74661 2018-10-08 09:46:26Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387
388/**
389 * Check if the guest has entered VMX root operation.
390 */
391# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
392
393/**
394 * Check if the guest has entered VMX non-root operation.
395 */
396# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
397
398/**
399 * Invokes the VMX VM-exit handler for an instruction intercept.
400 */
401# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
402 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
403
404/**
405 * Invokes the VMX VM-exit handler for an instruction intercept where the
406 * instruction provides additional VM-exit information.
407 */
408# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
409 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
410
411/**
412 * Check if the nested-guest has the given Pin-based VM-execution control set.
413 */
414# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
415 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
416
417/**
418 * Check if the nested-guest has the given Processor-based VM-execution control set.
419 */
420#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
421 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
422
423/**
424 * Check if the nested-guest has the given Secondary Processor-based VM-execution
425 * control set.
426 */
427#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
428 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
429
430#else
431# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
432# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
433# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
434# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
435# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
436# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
437# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
438
439#endif
440
441#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
442/**
443 * Check if an SVM control/instruction intercept is set.
444 */
445# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
446 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
447
448/**
449 * Check if an SVM read CRx intercept is set.
450 */
451# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
452 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
453
454/**
455 * Check if an SVM write CRx intercept is set.
456 */
457# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
458 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
459
460/**
461 * Check if an SVM read DRx intercept is set.
462 */
463# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
464 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
465
466/**
467 * Check if an SVM write DRx intercept is set.
468 */
469# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
470 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
471
472/**
473 * Check if an SVM exception intercept is set.
474 */
475# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
476 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
477
478/**
479 * Invokes the SVM \#VMEXIT handler for the nested-guest.
480 */
481# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
482 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
483
484/**
485 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
486 * corresponding decode assist information.
487 */
488# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
489 do \
490 { \
491 uint64_t uExitInfo1; \
492 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
493 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
494 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
495 else \
496 uExitInfo1 = 0; \
497 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
498 } while (0)
499
500/** Check and handles SVM nested-guest instruction intercept and updates
501 * NRIP if needed.
502 */
503# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
504 do \
505 { \
506 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
507 { \
508 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
509 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
510 } \
511 } while (0)
512
513/** Checks and handles SVM nested-guest CR0 read intercept. */
514# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
515 do \
516 { \
517 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
518 { /* probably likely */ } \
519 else \
520 { \
521 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
523 } \
524 } while (0)
525
526/**
527 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
528 */
529# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
530 do { \
531 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
532 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
533 } while (0)
534
535#else
536# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
537# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
538# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
539# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
540# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
541# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
542# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
543# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
544# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
545# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
546# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
547
548#endif
549
550
551/*********************************************************************************************************************************
552* Global Variables *
553*********************************************************************************************************************************/
554extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
555
556
557/** Function table for the ADD instruction. */
558IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
559{
560 iemAImpl_add_u8, iemAImpl_add_u8_locked,
561 iemAImpl_add_u16, iemAImpl_add_u16_locked,
562 iemAImpl_add_u32, iemAImpl_add_u32_locked,
563 iemAImpl_add_u64, iemAImpl_add_u64_locked
564};
565
566/** Function table for the ADC instruction. */
567IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
568{
569 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
570 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
571 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
572 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
573};
574
575/** Function table for the SUB instruction. */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
577{
578 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
579 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
580 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
581 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
582};
583
584/** Function table for the SBB instruction. */
585IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
586{
587 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
588 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
589 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
590 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
591};
592
593/** Function table for the OR instruction. */
594IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
595{
596 iemAImpl_or_u8, iemAImpl_or_u8_locked,
597 iemAImpl_or_u16, iemAImpl_or_u16_locked,
598 iemAImpl_or_u32, iemAImpl_or_u32_locked,
599 iemAImpl_or_u64, iemAImpl_or_u64_locked
600};
601
602/** Function table for the XOR instruction. */
603IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
604{
605 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
606 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
607 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
608 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
609};
610
611/** Function table for the AND instruction. */
612IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
613{
614 iemAImpl_and_u8, iemAImpl_and_u8_locked,
615 iemAImpl_and_u16, iemAImpl_and_u16_locked,
616 iemAImpl_and_u32, iemAImpl_and_u32_locked,
617 iemAImpl_and_u64, iemAImpl_and_u64_locked
618};
619
620/** Function table for the CMP instruction.
621 * @remarks Making operand order ASSUMPTIONS.
622 */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
624{
625 iemAImpl_cmp_u8, NULL,
626 iemAImpl_cmp_u16, NULL,
627 iemAImpl_cmp_u32, NULL,
628 iemAImpl_cmp_u64, NULL
629};
630
631/** Function table for the TEST instruction.
632 * @remarks Making operand order ASSUMPTIONS.
633 */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
635{
636 iemAImpl_test_u8, NULL,
637 iemAImpl_test_u16, NULL,
638 iemAImpl_test_u32, NULL,
639 iemAImpl_test_u64, NULL
640};
641
642/** Function table for the BT instruction. */
643IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
644{
645 NULL, NULL,
646 iemAImpl_bt_u16, NULL,
647 iemAImpl_bt_u32, NULL,
648 iemAImpl_bt_u64, NULL
649};
650
651/** Function table for the BTC instruction. */
652IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
653{
654 NULL, NULL,
655 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
656 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
657 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
658};
659
660/** Function table for the BTR instruction. */
661IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
662{
663 NULL, NULL,
664 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
665 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
666 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
667};
668
669/** Function table for the BTS instruction. */
670IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
671{
672 NULL, NULL,
673 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
674 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
675 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
676};
677
678/** Function table for the BSF instruction. */
679IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
680{
681 NULL, NULL,
682 iemAImpl_bsf_u16, NULL,
683 iemAImpl_bsf_u32, NULL,
684 iemAImpl_bsf_u64, NULL
685};
686
687/** Function table for the BSR instruction. */
688IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
689{
690 NULL, NULL,
691 iemAImpl_bsr_u16, NULL,
692 iemAImpl_bsr_u32, NULL,
693 iemAImpl_bsr_u64, NULL
694};
695
696/** Function table for the IMUL instruction. */
697IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
698{
699 NULL, NULL,
700 iemAImpl_imul_two_u16, NULL,
701 iemAImpl_imul_two_u32, NULL,
702 iemAImpl_imul_two_u64, NULL
703};
704
705/** Group 1 /r lookup table. */
706IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
707{
708 &g_iemAImpl_add,
709 &g_iemAImpl_or,
710 &g_iemAImpl_adc,
711 &g_iemAImpl_sbb,
712 &g_iemAImpl_and,
713 &g_iemAImpl_sub,
714 &g_iemAImpl_xor,
715 &g_iemAImpl_cmp
716};
717
718/** Function table for the INC instruction. */
719IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
720{
721 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
722 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
723 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
724 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
725};
726
727/** Function table for the DEC instruction. */
728IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
729{
730 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
731 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
732 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
733 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
734};
735
736/** Function table for the NEG instruction. */
737IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
738{
739 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
740 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
741 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
742 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
743};
744
745/** Function table for the NOT instruction. */
746IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
747{
748 iemAImpl_not_u8, iemAImpl_not_u8_locked,
749 iemAImpl_not_u16, iemAImpl_not_u16_locked,
750 iemAImpl_not_u32, iemAImpl_not_u32_locked,
751 iemAImpl_not_u64, iemAImpl_not_u64_locked
752};
753
754
755/** Function table for the ROL instruction. */
756IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
757{
758 iemAImpl_rol_u8,
759 iemAImpl_rol_u16,
760 iemAImpl_rol_u32,
761 iemAImpl_rol_u64
762};
763
764/** Function table for the ROR instruction. */
765IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
766{
767 iemAImpl_ror_u8,
768 iemAImpl_ror_u16,
769 iemAImpl_ror_u32,
770 iemAImpl_ror_u64
771};
772
773/** Function table for the RCL instruction. */
774IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
775{
776 iemAImpl_rcl_u8,
777 iemAImpl_rcl_u16,
778 iemAImpl_rcl_u32,
779 iemAImpl_rcl_u64
780};
781
782/** Function table for the RCR instruction. */
783IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
784{
785 iemAImpl_rcr_u8,
786 iemAImpl_rcr_u16,
787 iemAImpl_rcr_u32,
788 iemAImpl_rcr_u64
789};
790
791/** Function table for the SHL instruction. */
792IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
793{
794 iemAImpl_shl_u8,
795 iemAImpl_shl_u16,
796 iemAImpl_shl_u32,
797 iemAImpl_shl_u64
798};
799
800/** Function table for the SHR instruction. */
801IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
802{
803 iemAImpl_shr_u8,
804 iemAImpl_shr_u16,
805 iemAImpl_shr_u32,
806 iemAImpl_shr_u64
807};
808
809/** Function table for the SAR instruction. */
810IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
811{
812 iemAImpl_sar_u8,
813 iemAImpl_sar_u16,
814 iemAImpl_sar_u32,
815 iemAImpl_sar_u64
816};
817
818
819/** Function table for the MUL instruction. */
820IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
821{
822 iemAImpl_mul_u8,
823 iemAImpl_mul_u16,
824 iemAImpl_mul_u32,
825 iemAImpl_mul_u64
826};
827
828/** Function table for the IMUL instruction working implicitly on rAX. */
829IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
830{
831 iemAImpl_imul_u8,
832 iemAImpl_imul_u16,
833 iemAImpl_imul_u32,
834 iemAImpl_imul_u64
835};
836
837/** Function table for the DIV instruction. */
838IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
839{
840 iemAImpl_div_u8,
841 iemAImpl_div_u16,
842 iemAImpl_div_u32,
843 iemAImpl_div_u64
844};
845
846/** Function table for the MUL instruction. */
847IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
848{
849 iemAImpl_idiv_u8,
850 iemAImpl_idiv_u16,
851 iemAImpl_idiv_u32,
852 iemAImpl_idiv_u64
853};
854
855/** Function table for the SHLD instruction */
856IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
857{
858 iemAImpl_shld_u16,
859 iemAImpl_shld_u32,
860 iemAImpl_shld_u64,
861};
862
863/** Function table for the SHRD instruction */
864IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
865{
866 iemAImpl_shrd_u16,
867 iemAImpl_shrd_u32,
868 iemAImpl_shrd_u64,
869};
870
871
872/** Function table for the PUNPCKLBW instruction */
873IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
874/** Function table for the PUNPCKLBD instruction */
875IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
876/** Function table for the PUNPCKLDQ instruction */
877IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
878/** Function table for the PUNPCKLQDQ instruction */
879IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
880
881/** Function table for the PUNPCKHBW instruction */
882IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
883/** Function table for the PUNPCKHBD instruction */
884IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
885/** Function table for the PUNPCKHDQ instruction */
886IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
887/** Function table for the PUNPCKHQDQ instruction */
888IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
889
890/** Function table for the PXOR instruction */
891IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
892/** Function table for the PCMPEQB instruction */
893IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
894/** Function table for the PCMPEQW instruction */
895IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
896/** Function table for the PCMPEQD instruction */
897IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
898
899
900#if defined(IEM_LOG_MEMORY_WRITES)
901/** What IEM just wrote. */
902uint8_t g_abIemWrote[256];
903/** How much IEM just wrote. */
904size_t g_cbIemWrote;
905#endif
906
907
908/*********************************************************************************************************************************
909* Internal Functions *
910*********************************************************************************************************************************/
911IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
912IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
913IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
914IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
915/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
916IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
917IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
918IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
919IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
920IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
921IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
922IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
923IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
924IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
925IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
926IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
927IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
928#ifdef IEM_WITH_SETJMP
929DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
930DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
931DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
932DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
933DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
934#endif
935
936IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
937IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
938IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
939IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
940IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
941IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
942IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
943IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
944IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
945IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
946IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
947IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
948IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
949IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
950IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
951IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
952IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
955IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
956IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
957#endif
958
959
960/**
961 * Sets the pass up status.
962 *
963 * @returns VINF_SUCCESS.
964 * @param pVCpu The cross context virtual CPU structure of the
965 * calling thread.
966 * @param rcPassUp The pass up status. Must be informational.
967 * VINF_SUCCESS is not allowed.
968 */
969IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
970{
971 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
972
973 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
974 if (rcOldPassUp == VINF_SUCCESS)
975 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
976 /* If both are EM scheduling codes, use EM priority rules. */
977 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
978 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
979 {
980 if (rcPassUp < rcOldPassUp)
981 {
982 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
983 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
984 }
985 else
986 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
987 }
988 /* Override EM scheduling with specific status code. */
989 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
990 {
991 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
992 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
993 }
994 /* Don't override specific status code, first come first served. */
995 else
996 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
997 return VINF_SUCCESS;
998}
999
1000
1001/**
1002 * Calculates the CPU mode.
1003 *
1004 * This is mainly for updating IEMCPU::enmCpuMode.
1005 *
1006 * @returns CPU mode.
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 */
1010DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1011{
1012 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1013 return IEMMODE_64BIT;
1014 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1015 return IEMMODE_32BIT;
1016 return IEMMODE_16BIT;
1017}
1018
1019
1020/**
1021 * Initializes the execution state.
1022 *
1023 * @param pVCpu The cross context virtual CPU structure of the
1024 * calling thread.
1025 * @param fBypassHandlers Whether to bypass access handlers.
1026 *
1027 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1028 * side-effects in strict builds.
1029 */
1030DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1031{
1032 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1033 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1034
1035#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1044#endif
1045
1046#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1047 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1048#endif
1049 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1050 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1051#ifdef VBOX_STRICT
1052 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1053 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1054 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1055 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1056 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1057 pVCpu->iem.s.uRexReg = 127;
1058 pVCpu->iem.s.uRexB = 127;
1059 pVCpu->iem.s.offModRm = 127;
1060 pVCpu->iem.s.uRexIndex = 127;
1061 pVCpu->iem.s.iEffSeg = 127;
1062 pVCpu->iem.s.idxPrefix = 127;
1063 pVCpu->iem.s.uVex3rdReg = 127;
1064 pVCpu->iem.s.uVexLength = 127;
1065 pVCpu->iem.s.fEvexStuff = 127;
1066 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1067# ifdef IEM_WITH_CODE_TLB
1068 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1069 pVCpu->iem.s.pbInstrBuf = NULL;
1070 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1071 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1072 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1073 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1074# else
1075 pVCpu->iem.s.offOpcode = 127;
1076 pVCpu->iem.s.cbOpcode = 127;
1077# endif
1078#endif
1079
1080 pVCpu->iem.s.cActiveMappings = 0;
1081 pVCpu->iem.s.iNextMapping = 0;
1082 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1083 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1084#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1085 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1086 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1087 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1088 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1089 if (!pVCpu->iem.s.fInPatchCode)
1090 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1091#endif
1092}
1093
1094#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1095/**
1096 * Performs a minimal reinitialization of the execution state.
1097 *
1098 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1099 * 'world-switch' types operations on the CPU. Currently only nested
1100 * hardware-virtualization uses it.
1101 *
1102 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1103 */
1104IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1105{
1106 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1107 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1108
1109 pVCpu->iem.s.uCpl = uCpl;
1110 pVCpu->iem.s.enmCpuMode = enmMode;
1111 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1112 pVCpu->iem.s.enmEffAddrMode = enmMode;
1113 if (enmMode != IEMMODE_64BIT)
1114 {
1115 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1116 pVCpu->iem.s.enmEffOpSize = enmMode;
1117 }
1118 else
1119 {
1120 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1121 pVCpu->iem.s.enmEffOpSize = enmMode;
1122 }
1123 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1124#ifndef IEM_WITH_CODE_TLB
1125 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1126 pVCpu->iem.s.offOpcode = 0;
1127 pVCpu->iem.s.cbOpcode = 0;
1128#endif
1129 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1130}
1131#endif
1132
1133/**
1134 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the
1137 * calling thread.
1138 */
1139DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1140{
1141 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1142#ifdef VBOX_STRICT
1143# ifdef IEM_WITH_CODE_TLB
1144 NOREF(pVCpu);
1145# else
1146 pVCpu->iem.s.cbOpcode = 0;
1147# endif
1148#else
1149 NOREF(pVCpu);
1150#endif
1151}
1152
1153
1154/**
1155 * Initializes the decoder state.
1156 *
1157 * iemReInitDecoder is mostly a copy of this function.
1158 *
1159 * @param pVCpu The cross context virtual CPU structure of the
1160 * calling thread.
1161 * @param fBypassHandlers Whether to bypass access handlers.
1162 */
1163DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1164{
1165 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1166 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1167
1168#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1177#endif
1178
1179#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1180 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1181#endif
1182 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1183 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1184 pVCpu->iem.s.enmCpuMode = enmMode;
1185 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1186 pVCpu->iem.s.enmEffAddrMode = enmMode;
1187 if (enmMode != IEMMODE_64BIT)
1188 {
1189 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1190 pVCpu->iem.s.enmEffOpSize = enmMode;
1191 }
1192 else
1193 {
1194 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1195 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1196 }
1197 pVCpu->iem.s.fPrefixes = 0;
1198 pVCpu->iem.s.uRexReg = 0;
1199 pVCpu->iem.s.uRexB = 0;
1200 pVCpu->iem.s.uRexIndex = 0;
1201 pVCpu->iem.s.idxPrefix = 0;
1202 pVCpu->iem.s.uVex3rdReg = 0;
1203 pVCpu->iem.s.uVexLength = 0;
1204 pVCpu->iem.s.fEvexStuff = 0;
1205 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1206#ifdef IEM_WITH_CODE_TLB
1207 pVCpu->iem.s.pbInstrBuf = NULL;
1208 pVCpu->iem.s.offInstrNextByte = 0;
1209 pVCpu->iem.s.offCurInstrStart = 0;
1210# ifdef VBOX_STRICT
1211 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1212 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1213 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1214# endif
1215#else
1216 pVCpu->iem.s.offOpcode = 0;
1217 pVCpu->iem.s.cbOpcode = 0;
1218#endif
1219 pVCpu->iem.s.offModRm = 0;
1220 pVCpu->iem.s.cActiveMappings = 0;
1221 pVCpu->iem.s.iNextMapping = 0;
1222 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1223 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1224#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1225 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1226 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1227 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1228 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1229 if (!pVCpu->iem.s.fInPatchCode)
1230 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1231#endif
1232
1233#ifdef DBGFTRACE_ENABLED
1234 switch (enmMode)
1235 {
1236 case IEMMODE_64BIT:
1237 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1238 break;
1239 case IEMMODE_32BIT:
1240 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1241 break;
1242 case IEMMODE_16BIT:
1243 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1244 break;
1245 }
1246#endif
1247}
1248
1249
1250/**
1251 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1252 *
1253 * This is mostly a copy of iemInitDecoder.
1254 *
1255 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1256 */
1257DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1258{
1259 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1260
1261#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1264 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1265 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1267 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1268 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1269 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1270#endif
1271
1272 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1273 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1274 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1275 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1276 pVCpu->iem.s.enmEffAddrMode = enmMode;
1277 if (enmMode != IEMMODE_64BIT)
1278 {
1279 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1280 pVCpu->iem.s.enmEffOpSize = enmMode;
1281 }
1282 else
1283 {
1284 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1285 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1286 }
1287 pVCpu->iem.s.fPrefixes = 0;
1288 pVCpu->iem.s.uRexReg = 0;
1289 pVCpu->iem.s.uRexB = 0;
1290 pVCpu->iem.s.uRexIndex = 0;
1291 pVCpu->iem.s.idxPrefix = 0;
1292 pVCpu->iem.s.uVex3rdReg = 0;
1293 pVCpu->iem.s.uVexLength = 0;
1294 pVCpu->iem.s.fEvexStuff = 0;
1295 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1296#ifdef IEM_WITH_CODE_TLB
1297 if (pVCpu->iem.s.pbInstrBuf)
1298 {
1299 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1300 - pVCpu->iem.s.uInstrBufPc;
1301 if (off < pVCpu->iem.s.cbInstrBufTotal)
1302 {
1303 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1304 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1305 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1306 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1307 else
1308 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1309 }
1310 else
1311 {
1312 pVCpu->iem.s.pbInstrBuf = NULL;
1313 pVCpu->iem.s.offInstrNextByte = 0;
1314 pVCpu->iem.s.offCurInstrStart = 0;
1315 pVCpu->iem.s.cbInstrBuf = 0;
1316 pVCpu->iem.s.cbInstrBufTotal = 0;
1317 }
1318 }
1319 else
1320 {
1321 pVCpu->iem.s.offInstrNextByte = 0;
1322 pVCpu->iem.s.offCurInstrStart = 0;
1323 pVCpu->iem.s.cbInstrBuf = 0;
1324 pVCpu->iem.s.cbInstrBufTotal = 0;
1325 }
1326#else
1327 pVCpu->iem.s.cbOpcode = 0;
1328 pVCpu->iem.s.offOpcode = 0;
1329#endif
1330 pVCpu->iem.s.offModRm = 0;
1331 Assert(pVCpu->iem.s.cActiveMappings == 0);
1332 pVCpu->iem.s.iNextMapping = 0;
1333 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1334 Assert(pVCpu->iem.s.fBypassHandlers == false);
1335#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1336 if (!pVCpu->iem.s.fInPatchCode)
1337 { /* likely */ }
1338 else
1339 {
1340 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1341 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1342 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1343 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1344 if (!pVCpu->iem.s.fInPatchCode)
1345 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1346 }
1347#endif
1348
1349#ifdef DBGFTRACE_ENABLED
1350 switch (enmMode)
1351 {
1352 case IEMMODE_64BIT:
1353 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1354 break;
1355 case IEMMODE_32BIT:
1356 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1357 break;
1358 case IEMMODE_16BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1360 break;
1361 }
1362#endif
1363}
1364
1365
1366
1367/**
1368 * Prefetch opcodes the first time when starting executing.
1369 *
1370 * @returns Strict VBox status code.
1371 * @param pVCpu The cross context virtual CPU structure of the
1372 * calling thread.
1373 * @param fBypassHandlers Whether to bypass access handlers.
1374 */
1375IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1376{
1377 iemInitDecoder(pVCpu, fBypassHandlers);
1378
1379#ifdef IEM_WITH_CODE_TLB
1380 /** @todo Do ITLB lookup here. */
1381
1382#else /* !IEM_WITH_CODE_TLB */
1383
1384 /*
1385 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1386 *
1387 * First translate CS:rIP to a physical address.
1388 */
1389 uint32_t cbToTryRead;
1390 RTGCPTR GCPtrPC;
1391 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1392 {
1393 cbToTryRead = PAGE_SIZE;
1394 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1395 if (IEM_IS_CANONICAL(GCPtrPC))
1396 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1397 else
1398 return iemRaiseGeneralProtectionFault0(pVCpu);
1399 }
1400 else
1401 {
1402 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1403 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1404 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1405 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1406 else
1407 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1408 if (cbToTryRead) { /* likely */ }
1409 else /* overflowed */
1410 {
1411 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1412 cbToTryRead = UINT32_MAX;
1413 }
1414 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1415 Assert(GCPtrPC <= UINT32_MAX);
1416 }
1417
1418# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1419 /* Allow interpretation of patch manager code blocks since they can for
1420 instance throw #PFs for perfectly good reasons. */
1421 if (pVCpu->iem.s.fInPatchCode)
1422 {
1423 size_t cbRead = 0;
1424 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1425 AssertRCReturn(rc, rc);
1426 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1427 return VINF_SUCCESS;
1428 }
1429# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1430
1431 RTGCPHYS GCPhys;
1432 uint64_t fFlags;
1433 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1434 if (RT_SUCCESS(rc)) { /* probable */ }
1435 else
1436 {
1437 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1438 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1439 }
1440 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1441 else
1442 {
1443 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1444 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1445 }
1446 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1447 else
1448 {
1449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1450 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1451 }
1452 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1453 /** @todo Check reserved bits and such stuff. PGM is better at doing
1454 * that, so do it when implementing the guest virtual address
1455 * TLB... */
1456
1457 /*
1458 * Read the bytes at this address.
1459 */
1460 PVM pVM = pVCpu->CTX_SUFF(pVM);
1461# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1462 size_t cbActual;
1463 if ( PATMIsEnabled(pVM)
1464 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1465 {
1466 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1467 Assert(cbActual > 0);
1468 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1469 }
1470 else
1471# endif
1472 {
1473 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1474 if (cbToTryRead > cbLeftOnPage)
1475 cbToTryRead = cbLeftOnPage;
1476 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1477 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1478
1479 if (!pVCpu->iem.s.fBypassHandlers)
1480 {
1481 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1482 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1483 { /* likely */ }
1484 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1485 {
1486 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1487 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1488 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1489 }
1490 else
1491 {
1492 Log((RT_SUCCESS(rcStrict)
1493 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1494 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1495 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1496 return rcStrict;
1497 }
1498 }
1499 else
1500 {
1501 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1502 if (RT_SUCCESS(rc))
1503 { /* likely */ }
1504 else
1505 {
1506 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1507 GCPtrPC, GCPhys, rc, cbToTryRead));
1508 return rc;
1509 }
1510 }
1511 pVCpu->iem.s.cbOpcode = cbToTryRead;
1512 }
1513#endif /* !IEM_WITH_CODE_TLB */
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * Invalidates the IEM TLBs.
1520 *
1521 * This is called internally as well as by PGM when moving GC mappings.
1522 *
1523 * @returns
1524 * @param pVCpu The cross context virtual CPU structure of the calling
1525 * thread.
1526 * @param fVmm Set when PGM calls us with a remapping.
1527 */
1528VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1529{
1530#ifdef IEM_WITH_CODE_TLB
1531 pVCpu->iem.s.cbInstrBufTotal = 0;
1532 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1533 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1534 { /* very likely */ }
1535 else
1536 {
1537 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1538 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1539 while (i-- > 0)
1540 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1541 }
1542#endif
1543
1544#ifdef IEM_WITH_DATA_TLB
1545 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1546 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1547 { /* very likely */ }
1548 else
1549 {
1550 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1551 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1552 while (i-- > 0)
1553 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1554 }
1555#endif
1556 NOREF(pVCpu); NOREF(fVmm);
1557}
1558
1559
1560/**
1561 * Invalidates a page in the TLBs.
1562 *
1563 * @param pVCpu The cross context virtual CPU structure of the calling
1564 * thread.
1565 * @param GCPtr The address of the page to invalidate
1566 */
1567VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1568{
1569#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1570 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1572 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1573 uintptr_t idx = (uint8_t)GCPtr;
1574
1575# ifdef IEM_WITH_CODE_TLB
1576 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1577 {
1578 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1579 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1580 pVCpu->iem.s.cbInstrBufTotal = 0;
1581 }
1582# endif
1583
1584# ifdef IEM_WITH_DATA_TLB
1585 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1586 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1587# endif
1588#else
1589 NOREF(pVCpu); NOREF(GCPtr);
1590#endif
1591}
1592
1593
1594/**
1595 * Invalidates the host physical aspects of the IEM TLBs.
1596 *
1597 * This is called internally as well as by PGM when moving GC mappings.
1598 *
1599 * @param pVCpu The cross context virtual CPU structure of the calling
1600 * thread.
1601 */
1602VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1603{
1604#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1605 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1606
1607# ifdef IEM_WITH_CODE_TLB
1608 pVCpu->iem.s.cbInstrBufTotal = 0;
1609# endif
1610 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1611 if (uTlbPhysRev != 0)
1612 {
1613 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1614 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1615 }
1616 else
1617 {
1618 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1620
1621 unsigned i;
1622# ifdef IEM_WITH_CODE_TLB
1623 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1624 while (i-- > 0)
1625 {
1626 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1627 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1628 }
1629# endif
1630# ifdef IEM_WITH_DATA_TLB
1631 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1632 while (i-- > 0)
1633 {
1634 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1635 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1636 }
1637# endif
1638 }
1639#else
1640 NOREF(pVCpu);
1641#endif
1642}
1643
1644
1645/**
1646 * Invalidates the host physical aspects of the IEM TLBs.
1647 *
1648 * This is called internally as well as by PGM when moving GC mappings.
1649 *
1650 * @param pVM The cross context VM structure.
1651 *
1652 * @remarks Caller holds the PGM lock.
1653 */
1654VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1655{
1656 RT_NOREF_PV(pVM);
1657}
1658
1659#ifdef IEM_WITH_CODE_TLB
1660
1661/**
1662 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1663 * failure and jumps.
1664 *
1665 * We end up here for a number of reasons:
1666 * - pbInstrBuf isn't yet initialized.
1667 * - Advancing beyond the buffer boundrary (e.g. cross page).
1668 * - Advancing beyond the CS segment limit.
1669 * - Fetching from non-mappable page (e.g. MMIO).
1670 *
1671 * @param pVCpu The cross context virtual CPU structure of the
1672 * calling thread.
1673 * @param pvDst Where to return the bytes.
1674 * @param cbDst Number of bytes to read.
1675 *
1676 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1677 */
1678IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1679{
1680#ifdef IN_RING3
1681 for (;;)
1682 {
1683 Assert(cbDst <= 8);
1684 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1685
1686 /*
1687 * We might have a partial buffer match, deal with that first to make the
1688 * rest simpler. This is the first part of the cross page/buffer case.
1689 */
1690 if (pVCpu->iem.s.pbInstrBuf != NULL)
1691 {
1692 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1693 {
1694 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1695 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1696 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1697
1698 cbDst -= cbCopy;
1699 pvDst = (uint8_t *)pvDst + cbCopy;
1700 offBuf += cbCopy;
1701 pVCpu->iem.s.offInstrNextByte += offBuf;
1702 }
1703 }
1704
1705 /*
1706 * Check segment limit, figuring how much we're allowed to access at this point.
1707 *
1708 * We will fault immediately if RIP is past the segment limit / in non-canonical
1709 * territory. If we do continue, there are one or more bytes to read before we
1710 * end up in trouble and we need to do that first before faulting.
1711 */
1712 RTGCPTR GCPtrFirst;
1713 uint32_t cbMaxRead;
1714 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1715 {
1716 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1717 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1718 { /* likely */ }
1719 else
1720 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1721 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1722 }
1723 else
1724 {
1725 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1726 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1727 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1728 { /* likely */ }
1729 else
1730 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1731 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1732 if (cbMaxRead != 0)
1733 { /* likely */ }
1734 else
1735 {
1736 /* Overflowed because address is 0 and limit is max. */
1737 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1738 cbMaxRead = X86_PAGE_SIZE;
1739 }
1740 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1741 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1742 if (cbMaxRead2 < cbMaxRead)
1743 cbMaxRead = cbMaxRead2;
1744 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1745 }
1746
1747 /*
1748 * Get the TLB entry for this piece of code.
1749 */
1750 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1751 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1752 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1753 if (pTlbe->uTag == uTag)
1754 {
1755 /* likely when executing lots of code, otherwise unlikely */
1756# ifdef VBOX_WITH_STATISTICS
1757 pVCpu->iem.s.CodeTlb.cTlbHits++;
1758# endif
1759 }
1760 else
1761 {
1762 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1763# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1764 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1765 {
1766 pTlbe->uTag = uTag;
1767 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1768 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1769 pTlbe->GCPhys = NIL_RTGCPHYS;
1770 pTlbe->pbMappingR3 = NULL;
1771 }
1772 else
1773# endif
1774 {
1775 RTGCPHYS GCPhys;
1776 uint64_t fFlags;
1777 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1778 if (RT_FAILURE(rc))
1779 {
1780 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1781 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1782 }
1783
1784 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1785 pTlbe->uTag = uTag;
1786 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1787 pTlbe->GCPhys = GCPhys;
1788 pTlbe->pbMappingR3 = NULL;
1789 }
1790 }
1791
1792 /*
1793 * Check TLB page table level access flags.
1794 */
1795 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1796 {
1797 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1798 {
1799 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1800 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1801 }
1802 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1803 {
1804 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1805 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1806 }
1807 }
1808
1809# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1810 /*
1811 * Allow interpretation of patch manager code blocks since they can for
1812 * instance throw #PFs for perfectly good reasons.
1813 */
1814 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1815 { /* no unlikely */ }
1816 else
1817 {
1818 /** @todo Could be optimized this a little in ring-3 if we liked. */
1819 size_t cbRead = 0;
1820 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1821 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1822 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1823 return;
1824 }
1825# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1826
1827 /*
1828 * Look up the physical page info if necessary.
1829 */
1830 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1831 { /* not necessary */ }
1832 else
1833 {
1834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1836 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1837 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1838 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1839 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1840 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1841 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1842 }
1843
1844# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1845 /*
1846 * Try do a direct read using the pbMappingR3 pointer.
1847 */
1848 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1849 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1850 {
1851 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1852 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1853 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1854 {
1855 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1856 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1857 }
1858 else
1859 {
1860 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1861 Assert(cbInstr < cbMaxRead);
1862 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1863 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1864 }
1865 if (cbDst <= cbMaxRead)
1866 {
1867 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1868 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1869 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1870 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1871 return;
1872 }
1873 pVCpu->iem.s.pbInstrBuf = NULL;
1874
1875 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1876 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1877 }
1878 else
1879# endif
1880#if 0
1881 /*
1882 * If there is no special read handling, so we can read a bit more and
1883 * put it in the prefetch buffer.
1884 */
1885 if ( cbDst < cbMaxRead
1886 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1887 {
1888 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1889 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1890 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1891 { /* likely */ }
1892 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1893 {
1894 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1895 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1896 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1897 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1898 }
1899 else
1900 {
1901 Log((RT_SUCCESS(rcStrict)
1902 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1903 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1904 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1905 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1906 }
1907 }
1908 /*
1909 * Special read handling, so only read exactly what's needed.
1910 * This is a highly unlikely scenario.
1911 */
1912 else
1913#endif
1914 {
1915 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1916 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1917 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1918 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1919 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1920 { /* likely */ }
1921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1922 {
1923 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1924 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1925 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1926 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1927 }
1928 else
1929 {
1930 Log((RT_SUCCESS(rcStrict)
1931 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1932 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1933 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1934 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1935 }
1936 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1937 if (cbToRead == cbDst)
1938 return;
1939 }
1940
1941 /*
1942 * More to read, loop.
1943 */
1944 cbDst -= cbMaxRead;
1945 pvDst = (uint8_t *)pvDst + cbMaxRead;
1946 }
1947#else
1948 RT_NOREF(pvDst, cbDst);
1949 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1950#endif
1951}
1952
1953#else
1954
1955/**
1956 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1957 * exception if it fails.
1958 *
1959 * @returns Strict VBox status code.
1960 * @param pVCpu The cross context virtual CPU structure of the
1961 * calling thread.
1962 * @param cbMin The minimum number of bytes relative offOpcode
1963 * that must be read.
1964 */
1965IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1966{
1967 /*
1968 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1969 *
1970 * First translate CS:rIP to a physical address.
1971 */
1972 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1973 uint32_t cbToTryRead;
1974 RTGCPTR GCPtrNext;
1975 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1976 {
1977 cbToTryRead = PAGE_SIZE;
1978 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1979 if (!IEM_IS_CANONICAL(GCPtrNext))
1980 return iemRaiseGeneralProtectionFault0(pVCpu);
1981 }
1982 else
1983 {
1984 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1985 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1986 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1987 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1988 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1989 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1990 if (!cbToTryRead) /* overflowed */
1991 {
1992 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1993 cbToTryRead = UINT32_MAX;
1994 /** @todo check out wrapping around the code segment. */
1995 }
1996 if (cbToTryRead < cbMin - cbLeft)
1997 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1998 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1999 }
2000
2001 /* Only read up to the end of the page, and make sure we don't read more
2002 than the opcode buffer can hold. */
2003 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2004 if (cbToTryRead > cbLeftOnPage)
2005 cbToTryRead = cbLeftOnPage;
2006 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2007 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2008/** @todo r=bird: Convert assertion into undefined opcode exception? */
2009 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2010
2011# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2012 /* Allow interpretation of patch manager code blocks since they can for
2013 instance throw #PFs for perfectly good reasons. */
2014 if (pVCpu->iem.s.fInPatchCode)
2015 {
2016 size_t cbRead = 0;
2017 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2018 AssertRCReturn(rc, rc);
2019 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2020 return VINF_SUCCESS;
2021 }
2022# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2023
2024 RTGCPHYS GCPhys;
2025 uint64_t fFlags;
2026 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2027 if (RT_FAILURE(rc))
2028 {
2029 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2030 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2031 }
2032 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2033 {
2034 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2036 }
2037 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2038 {
2039 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2040 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2041 }
2042 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2043 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2044 /** @todo Check reserved bits and such stuff. PGM is better at doing
2045 * that, so do it when implementing the guest virtual address
2046 * TLB... */
2047
2048 /*
2049 * Read the bytes at this address.
2050 *
2051 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2052 * and since PATM should only patch the start of an instruction there
2053 * should be no need to check again here.
2054 */
2055 if (!pVCpu->iem.s.fBypassHandlers)
2056 {
2057 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2058 cbToTryRead, PGMACCESSORIGIN_IEM);
2059 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2060 { /* likely */ }
2061 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2062 {
2063 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2064 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2065 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2066 }
2067 else
2068 {
2069 Log((RT_SUCCESS(rcStrict)
2070 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2071 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2073 return rcStrict;
2074 }
2075 }
2076 else
2077 {
2078 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2079 if (RT_SUCCESS(rc))
2080 { /* likely */ }
2081 else
2082 {
2083 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2084 return rc;
2085 }
2086 }
2087 pVCpu->iem.s.cbOpcode += cbToTryRead;
2088 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2089
2090 return VINF_SUCCESS;
2091}
2092
2093#endif /* !IEM_WITH_CODE_TLB */
2094#ifndef IEM_WITH_SETJMP
2095
2096/**
2097 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure of the
2101 * calling thread.
2102 * @param pb Where to return the opcode byte.
2103 */
2104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2105{
2106 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2107 if (rcStrict == VINF_SUCCESS)
2108 {
2109 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2110 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2111 pVCpu->iem.s.offOpcode = offOpcode + 1;
2112 }
2113 else
2114 *pb = 0;
2115 return rcStrict;
2116}
2117
2118
2119/**
2120 * Fetches the next opcode byte.
2121 *
2122 * @returns Strict VBox status code.
2123 * @param pVCpu The cross context virtual CPU structure of the
2124 * calling thread.
2125 * @param pu8 Where to return the opcode byte.
2126 */
2127DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2128{
2129 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2130 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2131 {
2132 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2133 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2134 return VINF_SUCCESS;
2135 }
2136 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2137}
2138
2139#else /* IEM_WITH_SETJMP */
2140
2141/**
2142 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2143 *
2144 * @returns The opcode byte.
2145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2146 */
2147DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2148{
2149# ifdef IEM_WITH_CODE_TLB
2150 uint8_t u8;
2151 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2152 return u8;
2153# else
2154 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2155 if (rcStrict == VINF_SUCCESS)
2156 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2157 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2158# endif
2159}
2160
2161
2162/**
2163 * Fetches the next opcode byte, longjmp on error.
2164 *
2165 * @returns The opcode byte.
2166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2167 */
2168DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2169{
2170# ifdef IEM_WITH_CODE_TLB
2171 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2172 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2173 if (RT_LIKELY( pbBuf != NULL
2174 && offBuf < pVCpu->iem.s.cbInstrBuf))
2175 {
2176 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2177 return pbBuf[offBuf];
2178 }
2179# else
2180 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2181 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2182 {
2183 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2184 return pVCpu->iem.s.abOpcode[offOpcode];
2185 }
2186# endif
2187 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2188}
2189
2190#endif /* IEM_WITH_SETJMP */
2191
2192/**
2193 * Fetches the next opcode byte, returns automatically on failure.
2194 *
2195 * @param a_pu8 Where to return the opcode byte.
2196 * @remark Implicitly references pVCpu.
2197 */
2198#ifndef IEM_WITH_SETJMP
2199# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2200 do \
2201 { \
2202 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2203 if (rcStrict2 == VINF_SUCCESS) \
2204 { /* likely */ } \
2205 else \
2206 return rcStrict2; \
2207 } while (0)
2208#else
2209# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2210#endif /* IEM_WITH_SETJMP */
2211
2212
2213#ifndef IEM_WITH_SETJMP
2214/**
2215 * Fetches the next signed byte from the opcode stream.
2216 *
2217 * @returns Strict VBox status code.
2218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2219 * @param pi8 Where to return the signed byte.
2220 */
2221DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2222{
2223 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2224}
2225#endif /* !IEM_WITH_SETJMP */
2226
2227
2228/**
2229 * Fetches the next signed byte from the opcode stream, returning automatically
2230 * on failure.
2231 *
2232 * @param a_pi8 Where to return the signed byte.
2233 * @remark Implicitly references pVCpu.
2234 */
2235#ifndef IEM_WITH_SETJMP
2236# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2237 do \
2238 { \
2239 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2240 if (rcStrict2 != VINF_SUCCESS) \
2241 return rcStrict2; \
2242 } while (0)
2243#else /* IEM_WITH_SETJMP */
2244# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2245
2246#endif /* IEM_WITH_SETJMP */
2247
2248#ifndef IEM_WITH_SETJMP
2249
2250/**
2251 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pu16 Where to return the opcode dword.
2256 */
2257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2258{
2259 uint8_t u8;
2260 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2261 if (rcStrict == VINF_SUCCESS)
2262 *pu16 = (int8_t)u8;
2263 return rcStrict;
2264}
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, extending it to
2269 * unsigned 16-bit.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pu16 Where to return the unsigned word.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2276{
2277 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2278 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2279 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2280
2281 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2282 pVCpu->iem.s.offOpcode = offOpcode + 1;
2283 return VINF_SUCCESS;
2284}
2285
2286#endif /* !IEM_WITH_SETJMP */
2287
2288/**
2289 * Fetches the next signed byte from the opcode stream and sign-extending it to
2290 * a word, returning automatically on failure.
2291 *
2292 * @param a_pu16 Where to return the word.
2293 * @remark Implicitly references pVCpu.
2294 */
2295#ifndef IEM_WITH_SETJMP
2296# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2297 do \
2298 { \
2299 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2300 if (rcStrict2 != VINF_SUCCESS) \
2301 return rcStrict2; \
2302 } while (0)
2303#else
2304# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2305#endif
2306
2307#ifndef IEM_WITH_SETJMP
2308
2309/**
2310 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2311 *
2312 * @returns Strict VBox status code.
2313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2314 * @param pu32 Where to return the opcode dword.
2315 */
2316DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2317{
2318 uint8_t u8;
2319 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2320 if (rcStrict == VINF_SUCCESS)
2321 *pu32 = (int8_t)u8;
2322 return rcStrict;
2323}
2324
2325
2326/**
2327 * Fetches the next signed byte from the opcode stream, extending it to
2328 * unsigned 32-bit.
2329 *
2330 * @returns Strict VBox status code.
2331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2332 * @param pu32 Where to return the unsigned dword.
2333 */
2334DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2335{
2336 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2337 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2338 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2339
2340 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2341 pVCpu->iem.s.offOpcode = offOpcode + 1;
2342 return VINF_SUCCESS;
2343}
2344
2345#endif /* !IEM_WITH_SETJMP */
2346
2347/**
2348 * Fetches the next signed byte from the opcode stream and sign-extending it to
2349 * a word, returning automatically on failure.
2350 *
2351 * @param a_pu32 Where to return the word.
2352 * @remark Implicitly references pVCpu.
2353 */
2354#ifndef IEM_WITH_SETJMP
2355#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2356 do \
2357 { \
2358 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2359 if (rcStrict2 != VINF_SUCCESS) \
2360 return rcStrict2; \
2361 } while (0)
2362#else
2363# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2364#endif
2365
2366#ifndef IEM_WITH_SETJMP
2367
2368/**
2369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2370 *
2371 * @returns Strict VBox status code.
2372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2373 * @param pu64 Where to return the opcode qword.
2374 */
2375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2376{
2377 uint8_t u8;
2378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2379 if (rcStrict == VINF_SUCCESS)
2380 *pu64 = (int8_t)u8;
2381 return rcStrict;
2382}
2383
2384
2385/**
2386 * Fetches the next signed byte from the opcode stream, extending it to
2387 * unsigned 64-bit.
2388 *
2389 * @returns Strict VBox status code.
2390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2391 * @param pu64 Where to return the unsigned qword.
2392 */
2393DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2394{
2395 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2396 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2397 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2398
2399 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2400 pVCpu->iem.s.offOpcode = offOpcode + 1;
2401 return VINF_SUCCESS;
2402}
2403
2404#endif /* !IEM_WITH_SETJMP */
2405
2406
2407/**
2408 * Fetches the next signed byte from the opcode stream and sign-extending it to
2409 * a word, returning automatically on failure.
2410 *
2411 * @param a_pu64 Where to return the word.
2412 * @remark Implicitly references pVCpu.
2413 */
2414#ifndef IEM_WITH_SETJMP
2415# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2416 do \
2417 { \
2418 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2419 if (rcStrict2 != VINF_SUCCESS) \
2420 return rcStrict2; \
2421 } while (0)
2422#else
2423# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2424#endif
2425
2426
2427#ifndef IEM_WITH_SETJMP
2428/**
2429 * Fetches the next opcode byte.
2430 *
2431 * @returns Strict VBox status code.
2432 * @param pVCpu The cross context virtual CPU structure of the
2433 * calling thread.
2434 * @param pu8 Where to return the opcode byte.
2435 */
2436DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2437{
2438 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2439 pVCpu->iem.s.offModRm = offOpcode;
2440 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2441 {
2442 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2443 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2444 return VINF_SUCCESS;
2445 }
2446 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2447}
2448#else /* IEM_WITH_SETJMP */
2449/**
2450 * Fetches the next opcode byte, longjmp on error.
2451 *
2452 * @returns The opcode byte.
2453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2454 */
2455DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2456{
2457# ifdef IEM_WITH_CODE_TLB
2458 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2459 pVCpu->iem.s.offModRm = offBuf;
2460 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2461 if (RT_LIKELY( pbBuf != NULL
2462 && offBuf < pVCpu->iem.s.cbInstrBuf))
2463 {
2464 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2465 return pbBuf[offBuf];
2466 }
2467# else
2468 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2469 pVCpu->iem.s.offModRm = offOpcode;
2470 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2471 {
2472 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2473 return pVCpu->iem.s.abOpcode[offOpcode];
2474 }
2475# endif
2476 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2477}
2478#endif /* IEM_WITH_SETJMP */
2479
2480/**
2481 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2482 * on failure.
2483 *
2484 * Will note down the position of the ModR/M byte for VT-x exits.
2485 *
2486 * @param a_pbRm Where to return the RM opcode byte.
2487 * @remark Implicitly references pVCpu.
2488 */
2489#ifndef IEM_WITH_SETJMP
2490# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2491 do \
2492 { \
2493 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2494 if (rcStrict2 == VINF_SUCCESS) \
2495 { /* likely */ } \
2496 else \
2497 return rcStrict2; \
2498 } while (0)
2499#else
2500# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2501#endif /* IEM_WITH_SETJMP */
2502
2503
2504#ifndef IEM_WITH_SETJMP
2505
2506/**
2507 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2508 *
2509 * @returns Strict VBox status code.
2510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2511 * @param pu16 Where to return the opcode word.
2512 */
2513DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2514{
2515 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2516 if (rcStrict == VINF_SUCCESS)
2517 {
2518 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2519# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2520 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2521# else
2522 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2523# endif
2524 pVCpu->iem.s.offOpcode = offOpcode + 2;
2525 }
2526 else
2527 *pu16 = 0;
2528 return rcStrict;
2529}
2530
2531
2532/**
2533 * Fetches the next opcode word.
2534 *
2535 * @returns Strict VBox status code.
2536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2537 * @param pu16 Where to return the opcode word.
2538 */
2539DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2540{
2541 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2542 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2543 {
2544 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2546 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2547# else
2548 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2549# endif
2550 return VINF_SUCCESS;
2551 }
2552 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2553}
2554
2555#else /* IEM_WITH_SETJMP */
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2559 *
2560 * @returns The opcode word.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 */
2563DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2564{
2565# ifdef IEM_WITH_CODE_TLB
2566 uint16_t u16;
2567 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2568 return u16;
2569# else
2570 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2571 if (rcStrict == VINF_SUCCESS)
2572 {
2573 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2574 pVCpu->iem.s.offOpcode += 2;
2575# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2576 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2577# else
2578 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2579# endif
2580 }
2581 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2582# endif
2583}
2584
2585
2586/**
2587 * Fetches the next opcode word, longjmp on error.
2588 *
2589 * @returns The opcode word.
2590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2591 */
2592DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2593{
2594# ifdef IEM_WITH_CODE_TLB
2595 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2596 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2597 if (RT_LIKELY( pbBuf != NULL
2598 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2599 {
2600 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2601# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2602 return *(uint16_t const *)&pbBuf[offBuf];
2603# else
2604 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2605# endif
2606 }
2607# else
2608 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2609 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2610 {
2611 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2612# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2613 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2614# else
2615 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2616# endif
2617 }
2618# endif
2619 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2620}
2621
2622#endif /* IEM_WITH_SETJMP */
2623
2624
2625/**
2626 * Fetches the next opcode word, returns automatically on failure.
2627 *
2628 * @param a_pu16 Where to return the opcode word.
2629 * @remark Implicitly references pVCpu.
2630 */
2631#ifndef IEM_WITH_SETJMP
2632# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2633 do \
2634 { \
2635 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2636 if (rcStrict2 != VINF_SUCCESS) \
2637 return rcStrict2; \
2638 } while (0)
2639#else
2640# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2641#endif
2642
2643#ifndef IEM_WITH_SETJMP
2644
2645/**
2646 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2647 *
2648 * @returns Strict VBox status code.
2649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2650 * @param pu32 Where to return the opcode double word.
2651 */
2652DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2653{
2654 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2655 if (rcStrict == VINF_SUCCESS)
2656 {
2657 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2658 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2659 pVCpu->iem.s.offOpcode = offOpcode + 2;
2660 }
2661 else
2662 *pu32 = 0;
2663 return rcStrict;
2664}
2665
2666
2667/**
2668 * Fetches the next opcode word, zero extending it to a double word.
2669 *
2670 * @returns Strict VBox status code.
2671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2672 * @param pu32 Where to return the opcode double word.
2673 */
2674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2675{
2676 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2677 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2678 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2679
2680 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2681 pVCpu->iem.s.offOpcode = offOpcode + 2;
2682 return VINF_SUCCESS;
2683}
2684
2685#endif /* !IEM_WITH_SETJMP */
2686
2687
2688/**
2689 * Fetches the next opcode word and zero extends it to a double word, returns
2690 * automatically on failure.
2691 *
2692 * @param a_pu32 Where to return the opcode double word.
2693 * @remark Implicitly references pVCpu.
2694 */
2695#ifndef IEM_WITH_SETJMP
2696# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2697 do \
2698 { \
2699 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2700 if (rcStrict2 != VINF_SUCCESS) \
2701 return rcStrict2; \
2702 } while (0)
2703#else
2704# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2705#endif
2706
2707#ifndef IEM_WITH_SETJMP
2708
2709/**
2710 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2711 *
2712 * @returns Strict VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2714 * @param pu64 Where to return the opcode quad word.
2715 */
2716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2717{
2718 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2719 if (rcStrict == VINF_SUCCESS)
2720 {
2721 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2722 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2723 pVCpu->iem.s.offOpcode = offOpcode + 2;
2724 }
2725 else
2726 *pu64 = 0;
2727 return rcStrict;
2728}
2729
2730
2731/**
2732 * Fetches the next opcode word, zero extending it to a quad word.
2733 *
2734 * @returns Strict VBox status code.
2735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2736 * @param pu64 Where to return the opcode quad word.
2737 */
2738DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2739{
2740 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2741 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2742 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2743
2744 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2745 pVCpu->iem.s.offOpcode = offOpcode + 2;
2746 return VINF_SUCCESS;
2747}
2748
2749#endif /* !IEM_WITH_SETJMP */
2750
2751/**
2752 * Fetches the next opcode word and zero extends it to a quad word, returns
2753 * automatically on failure.
2754 *
2755 * @param a_pu64 Where to return the opcode quad word.
2756 * @remark Implicitly references pVCpu.
2757 */
2758#ifndef IEM_WITH_SETJMP
2759# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2760 do \
2761 { \
2762 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2763 if (rcStrict2 != VINF_SUCCESS) \
2764 return rcStrict2; \
2765 } while (0)
2766#else
2767# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2768#endif
2769
2770
2771#ifndef IEM_WITH_SETJMP
2772/**
2773 * Fetches the next signed word from the opcode stream.
2774 *
2775 * @returns Strict VBox status code.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 * @param pi16 Where to return the signed word.
2778 */
2779DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2780{
2781 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2782}
2783#endif /* !IEM_WITH_SETJMP */
2784
2785
2786/**
2787 * Fetches the next signed word from the opcode stream, returning automatically
2788 * on failure.
2789 *
2790 * @param a_pi16 Where to return the signed word.
2791 * @remark Implicitly references pVCpu.
2792 */
2793#ifndef IEM_WITH_SETJMP
2794# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2795 do \
2796 { \
2797 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2798 if (rcStrict2 != VINF_SUCCESS) \
2799 return rcStrict2; \
2800 } while (0)
2801#else
2802# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2803#endif
2804
2805#ifndef IEM_WITH_SETJMP
2806
2807/**
2808 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2812 * @param pu32 Where to return the opcode dword.
2813 */
2814DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2815{
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2821 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2822# else
2823 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2824 pVCpu->iem.s.abOpcode[offOpcode + 1],
2825 pVCpu->iem.s.abOpcode[offOpcode + 2],
2826 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2827# endif
2828 pVCpu->iem.s.offOpcode = offOpcode + 4;
2829 }
2830 else
2831 *pu32 = 0;
2832 return rcStrict;
2833}
2834
2835
2836/**
2837 * Fetches the next opcode dword.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pu32 Where to return the opcode double word.
2842 */
2843DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2844{
2845 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2846 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2847 {
2848 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2849# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2850 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2851# else
2852 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2853 pVCpu->iem.s.abOpcode[offOpcode + 1],
2854 pVCpu->iem.s.abOpcode[offOpcode + 2],
2855 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2856# endif
2857 return VINF_SUCCESS;
2858 }
2859 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2860}
2861
2862#else /* !IEM_WITH_SETJMP */
2863
2864/**
2865 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2866 *
2867 * @returns The opcode dword.
2868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2869 */
2870DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2871{
2872# ifdef IEM_WITH_CODE_TLB
2873 uint32_t u32;
2874 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2875 return u32;
2876# else
2877 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2878 if (rcStrict == VINF_SUCCESS)
2879 {
2880 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2881 pVCpu->iem.s.offOpcode = offOpcode + 4;
2882# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2883 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2884# else
2885 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2886 pVCpu->iem.s.abOpcode[offOpcode + 1],
2887 pVCpu->iem.s.abOpcode[offOpcode + 2],
2888 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2889# endif
2890 }
2891 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2892# endif
2893}
2894
2895
2896/**
2897 * Fetches the next opcode dword, longjmp on error.
2898 *
2899 * @returns The opcode dword.
2900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2901 */
2902DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2903{
2904# ifdef IEM_WITH_CODE_TLB
2905 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2906 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2907 if (RT_LIKELY( pbBuf != NULL
2908 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2909 {
2910 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2911# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2912 return *(uint32_t const *)&pbBuf[offBuf];
2913# else
2914 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2915 pbBuf[offBuf + 1],
2916 pbBuf[offBuf + 2],
2917 pbBuf[offBuf + 3]);
2918# endif
2919 }
2920# else
2921 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2922 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2923 {
2924 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2925# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2926 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2927# else
2928 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2929 pVCpu->iem.s.abOpcode[offOpcode + 1],
2930 pVCpu->iem.s.abOpcode[offOpcode + 2],
2931 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2932# endif
2933 }
2934# endif
2935 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2936}
2937
2938#endif /* !IEM_WITH_SETJMP */
2939
2940
2941/**
2942 * Fetches the next opcode dword, returns automatically on failure.
2943 *
2944 * @param a_pu32 Where to return the opcode dword.
2945 * @remark Implicitly references pVCpu.
2946 */
2947#ifndef IEM_WITH_SETJMP
2948# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2949 do \
2950 { \
2951 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2952 if (rcStrict2 != VINF_SUCCESS) \
2953 return rcStrict2; \
2954 } while (0)
2955#else
2956# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2957#endif
2958
2959#ifndef IEM_WITH_SETJMP
2960
2961/**
2962 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2963 *
2964 * @returns Strict VBox status code.
2965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2966 * @param pu64 Where to return the opcode dword.
2967 */
2968DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2969{
2970 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2971 if (rcStrict == VINF_SUCCESS)
2972 {
2973 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2974 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2975 pVCpu->iem.s.abOpcode[offOpcode + 1],
2976 pVCpu->iem.s.abOpcode[offOpcode + 2],
2977 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2978 pVCpu->iem.s.offOpcode = offOpcode + 4;
2979 }
2980 else
2981 *pu64 = 0;
2982 return rcStrict;
2983}
2984
2985
2986/**
2987 * Fetches the next opcode dword, zero extending it to a quad word.
2988 *
2989 * @returns Strict VBox status code.
2990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2991 * @param pu64 Where to return the opcode quad word.
2992 */
2993DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2994{
2995 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2996 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2997 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2998
2999 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3000 pVCpu->iem.s.abOpcode[offOpcode + 1],
3001 pVCpu->iem.s.abOpcode[offOpcode + 2],
3002 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3003 pVCpu->iem.s.offOpcode = offOpcode + 4;
3004 return VINF_SUCCESS;
3005}
3006
3007#endif /* !IEM_WITH_SETJMP */
3008
3009
3010/**
3011 * Fetches the next opcode dword and zero extends it to a quad word, returns
3012 * automatically on failure.
3013 *
3014 * @param a_pu64 Where to return the opcode quad word.
3015 * @remark Implicitly references pVCpu.
3016 */
3017#ifndef IEM_WITH_SETJMP
3018# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3019 do \
3020 { \
3021 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3022 if (rcStrict2 != VINF_SUCCESS) \
3023 return rcStrict2; \
3024 } while (0)
3025#else
3026# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3027#endif
3028
3029
3030#ifndef IEM_WITH_SETJMP
3031/**
3032 * Fetches the next signed double word from the opcode stream.
3033 *
3034 * @returns Strict VBox status code.
3035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3036 * @param pi32 Where to return the signed double word.
3037 */
3038DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3039{
3040 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3041}
3042#endif
3043
3044/**
3045 * Fetches the next signed double word from the opcode stream, returning
3046 * automatically on failure.
3047 *
3048 * @param a_pi32 Where to return the signed double word.
3049 * @remark Implicitly references pVCpu.
3050 */
3051#ifndef IEM_WITH_SETJMP
3052# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3053 do \
3054 { \
3055 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3056 if (rcStrict2 != VINF_SUCCESS) \
3057 return rcStrict2; \
3058 } while (0)
3059#else
3060# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3061#endif
3062
3063#ifndef IEM_WITH_SETJMP
3064
3065/**
3066 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3067 *
3068 * @returns Strict VBox status code.
3069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3070 * @param pu64 Where to return the opcode qword.
3071 */
3072DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3073{
3074 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3075 if (rcStrict == VINF_SUCCESS)
3076 {
3077 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3078 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3082 pVCpu->iem.s.offOpcode = offOpcode + 4;
3083 }
3084 else
3085 *pu64 = 0;
3086 return rcStrict;
3087}
3088
3089
3090/**
3091 * Fetches the next opcode dword, sign extending it into a quad word.
3092 *
3093 * @returns Strict VBox status code.
3094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3095 * @param pu64 Where to return the opcode quad word.
3096 */
3097DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3098{
3099 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3100 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3101 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3102
3103 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3104 pVCpu->iem.s.abOpcode[offOpcode + 1],
3105 pVCpu->iem.s.abOpcode[offOpcode + 2],
3106 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3107 *pu64 = i32;
3108 pVCpu->iem.s.offOpcode = offOpcode + 4;
3109 return VINF_SUCCESS;
3110}
3111
3112#endif /* !IEM_WITH_SETJMP */
3113
3114
3115/**
3116 * Fetches the next opcode double word and sign extends it to a quad word,
3117 * returns automatically on failure.
3118 *
3119 * @param a_pu64 Where to return the opcode quad word.
3120 * @remark Implicitly references pVCpu.
3121 */
3122#ifndef IEM_WITH_SETJMP
3123# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3124 do \
3125 { \
3126 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3127 if (rcStrict2 != VINF_SUCCESS) \
3128 return rcStrict2; \
3129 } while (0)
3130#else
3131# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3132#endif
3133
3134#ifndef IEM_WITH_SETJMP
3135
3136/**
3137 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3138 *
3139 * @returns Strict VBox status code.
3140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3141 * @param pu64 Where to return the opcode qword.
3142 */
3143DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3144{
3145 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3146 if (rcStrict == VINF_SUCCESS)
3147 {
3148 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3149# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3150 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3151# else
3152 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3153 pVCpu->iem.s.abOpcode[offOpcode + 1],
3154 pVCpu->iem.s.abOpcode[offOpcode + 2],
3155 pVCpu->iem.s.abOpcode[offOpcode + 3],
3156 pVCpu->iem.s.abOpcode[offOpcode + 4],
3157 pVCpu->iem.s.abOpcode[offOpcode + 5],
3158 pVCpu->iem.s.abOpcode[offOpcode + 6],
3159 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3160# endif
3161 pVCpu->iem.s.offOpcode = offOpcode + 8;
3162 }
3163 else
3164 *pu64 = 0;
3165 return rcStrict;
3166}
3167
3168
3169/**
3170 * Fetches the next opcode qword.
3171 *
3172 * @returns Strict VBox status code.
3173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3174 * @param pu64 Where to return the opcode qword.
3175 */
3176DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3177{
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3182 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3183# else
3184 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3185 pVCpu->iem.s.abOpcode[offOpcode + 1],
3186 pVCpu->iem.s.abOpcode[offOpcode + 2],
3187 pVCpu->iem.s.abOpcode[offOpcode + 3],
3188 pVCpu->iem.s.abOpcode[offOpcode + 4],
3189 pVCpu->iem.s.abOpcode[offOpcode + 5],
3190 pVCpu->iem.s.abOpcode[offOpcode + 6],
3191 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3192# endif
3193 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3194 return VINF_SUCCESS;
3195 }
3196 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3197}
3198
3199#else /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3203 *
3204 * @returns The opcode qword.
3205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3206 */
3207DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3208{
3209# ifdef IEM_WITH_CODE_TLB
3210 uint64_t u64;
3211 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3212 return u64;
3213# else
3214 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3215 if (rcStrict == VINF_SUCCESS)
3216 {
3217 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3218 pVCpu->iem.s.offOpcode = offOpcode + 8;
3219# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3220 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3221# else
3222 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3223 pVCpu->iem.s.abOpcode[offOpcode + 1],
3224 pVCpu->iem.s.abOpcode[offOpcode + 2],
3225 pVCpu->iem.s.abOpcode[offOpcode + 3],
3226 pVCpu->iem.s.abOpcode[offOpcode + 4],
3227 pVCpu->iem.s.abOpcode[offOpcode + 5],
3228 pVCpu->iem.s.abOpcode[offOpcode + 6],
3229 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3230# endif
3231 }
3232 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3233# endif
3234}
3235
3236
3237/**
3238 * Fetches the next opcode qword, longjmp on error.
3239 *
3240 * @returns The opcode qword.
3241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3242 */
3243DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3244{
3245# ifdef IEM_WITH_CODE_TLB
3246 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3247 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3248 if (RT_LIKELY( pbBuf != NULL
3249 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3250 {
3251 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3252# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3253 return *(uint64_t const *)&pbBuf[offBuf];
3254# else
3255 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3256 pbBuf[offBuf + 1],
3257 pbBuf[offBuf + 2],
3258 pbBuf[offBuf + 3],
3259 pbBuf[offBuf + 4],
3260 pbBuf[offBuf + 5],
3261 pbBuf[offBuf + 6],
3262 pbBuf[offBuf + 7]);
3263# endif
3264 }
3265# else
3266 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3267 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3268 {
3269 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3270# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3271 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3272# else
3273 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3274 pVCpu->iem.s.abOpcode[offOpcode + 1],
3275 pVCpu->iem.s.abOpcode[offOpcode + 2],
3276 pVCpu->iem.s.abOpcode[offOpcode + 3],
3277 pVCpu->iem.s.abOpcode[offOpcode + 4],
3278 pVCpu->iem.s.abOpcode[offOpcode + 5],
3279 pVCpu->iem.s.abOpcode[offOpcode + 6],
3280 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3281# endif
3282 }
3283# endif
3284 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3285}
3286
3287#endif /* IEM_WITH_SETJMP */
3288
3289/**
3290 * Fetches the next opcode quad word, returns automatically on failure.
3291 *
3292 * @param a_pu64 Where to return the opcode quad word.
3293 * @remark Implicitly references pVCpu.
3294 */
3295#ifndef IEM_WITH_SETJMP
3296# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3297 do \
3298 { \
3299 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3300 if (rcStrict2 != VINF_SUCCESS) \
3301 return rcStrict2; \
3302 } while (0)
3303#else
3304# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3305#endif
3306
3307
3308/** @name Misc Worker Functions.
3309 * @{
3310 */
3311
3312/**
3313 * Gets the exception class for the specified exception vector.
3314 *
3315 * @returns The class of the specified exception.
3316 * @param uVector The exception vector.
3317 */
3318IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3319{
3320 Assert(uVector <= X86_XCPT_LAST);
3321 switch (uVector)
3322 {
3323 case X86_XCPT_DE:
3324 case X86_XCPT_TS:
3325 case X86_XCPT_NP:
3326 case X86_XCPT_SS:
3327 case X86_XCPT_GP:
3328 case X86_XCPT_SX: /* AMD only */
3329 return IEMXCPTCLASS_CONTRIBUTORY;
3330
3331 case X86_XCPT_PF:
3332 case X86_XCPT_VE: /* Intel only */
3333 return IEMXCPTCLASS_PAGE_FAULT;
3334
3335 case X86_XCPT_DF:
3336 return IEMXCPTCLASS_DOUBLE_FAULT;
3337 }
3338 return IEMXCPTCLASS_BENIGN;
3339}
3340
3341
3342/**
3343 * Evaluates how to handle an exception caused during delivery of another event
3344 * (exception / interrupt).
3345 *
3346 * @returns How to handle the recursive exception.
3347 * @param pVCpu The cross context virtual CPU structure of the
3348 * calling thread.
3349 * @param fPrevFlags The flags of the previous event.
3350 * @param uPrevVector The vector of the previous event.
3351 * @param fCurFlags The flags of the current exception.
3352 * @param uCurVector The vector of the current exception.
3353 * @param pfXcptRaiseInfo Where to store additional information about the
3354 * exception condition. Optional.
3355 */
3356VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3357 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3358{
3359 /*
3360 * Only CPU exceptions can be raised while delivering other events, software interrupt
3361 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3362 */
3363 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3364 Assert(pVCpu); RT_NOREF(pVCpu);
3365 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3366
3367 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3368 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3369 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3370 {
3371 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3372 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3373 {
3374 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3375 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3376 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3377 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3378 {
3379 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3380 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3381 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3382 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3383 uCurVector, pVCpu->cpum.GstCtx.cr2));
3384 }
3385 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3386 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3387 {
3388 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3389 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3390 }
3391 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3392 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3393 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3394 {
3395 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3396 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3397 }
3398 }
3399 else
3400 {
3401 if (uPrevVector == X86_XCPT_NMI)
3402 {
3403 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3404 if (uCurVector == X86_XCPT_PF)
3405 {
3406 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3407 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3408 }
3409 }
3410 else if ( uPrevVector == X86_XCPT_AC
3411 && uCurVector == X86_XCPT_AC)
3412 {
3413 enmRaise = IEMXCPTRAISE_CPU_HANG;
3414 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3415 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3416 }
3417 }
3418 }
3419 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3420 {
3421 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3422 if (uCurVector == X86_XCPT_PF)
3423 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3424 }
3425 else
3426 {
3427 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3428 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3429 }
3430
3431 if (pfXcptRaiseInfo)
3432 *pfXcptRaiseInfo = fRaiseInfo;
3433 return enmRaise;
3434}
3435
3436
3437/**
3438 * Enters the CPU shutdown state initiated by a triple fault or other
3439 * unrecoverable conditions.
3440 *
3441 * @returns Strict VBox status code.
3442 * @param pVCpu The cross context virtual CPU structure of the
3443 * calling thread.
3444 */
3445IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3446{
3447 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3448 {
3449 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3450 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3451 }
3452
3453 RT_NOREF(pVCpu);
3454 return VINF_EM_TRIPLE_FAULT;
3455}
3456
3457
3458/**
3459 * Validates a new SS segment.
3460 *
3461 * @returns VBox strict status code.
3462 * @param pVCpu The cross context virtual CPU structure of the
3463 * calling thread.
3464 * @param NewSS The new SS selctor.
3465 * @param uCpl The CPL to load the stack for.
3466 * @param pDesc Where to return the descriptor.
3467 */
3468IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3469{
3470 /* Null selectors are not allowed (we're not called for dispatching
3471 interrupts with SS=0 in long mode). */
3472 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3473 {
3474 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3475 return iemRaiseTaskSwitchFault0(pVCpu);
3476 }
3477
3478 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3479 if ((NewSS & X86_SEL_RPL) != uCpl)
3480 {
3481 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3482 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3483 }
3484
3485 /*
3486 * Read the descriptor.
3487 */
3488 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3489 if (rcStrict != VINF_SUCCESS)
3490 return rcStrict;
3491
3492 /*
3493 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3494 */
3495 if (!pDesc->Legacy.Gen.u1DescType)
3496 {
3497 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3498 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3499 }
3500
3501 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3502 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3503 {
3504 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3505 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3506 }
3507 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3508 {
3509 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3510 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3511 }
3512
3513 /* Is it there? */
3514 /** @todo testcase: Is this checked before the canonical / limit check below? */
3515 if (!pDesc->Legacy.Gen.u1Present)
3516 {
3517 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3518 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3519 }
3520
3521 return VINF_SUCCESS;
3522}
3523
3524
3525/**
3526 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3527 * not.
3528 *
3529 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3530 */
3531#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3532# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3533#else
3534# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3535#endif
3536
3537/**
3538 * Updates the EFLAGS in the correct manner wrt. PATM.
3539 *
3540 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3541 * @param a_fEfl The new EFLAGS.
3542 */
3543#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3544# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3545#else
3546# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3547#endif
3548
3549
3550/** @} */
3551
3552/** @name Raising Exceptions.
3553 *
3554 * @{
3555 */
3556
3557
3558/**
3559 * Loads the specified stack far pointer from the TSS.
3560 *
3561 * @returns VBox strict status code.
3562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3563 * @param uCpl The CPL to load the stack for.
3564 * @param pSelSS Where to return the new stack segment.
3565 * @param puEsp Where to return the new stack pointer.
3566 */
3567IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3568{
3569 VBOXSTRICTRC rcStrict;
3570 Assert(uCpl < 4);
3571
3572 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3573 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3574 {
3575 /*
3576 * 16-bit TSS (X86TSS16).
3577 */
3578 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3579 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3580 {
3581 uint32_t off = uCpl * 4 + 2;
3582 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3583 {
3584 /** @todo check actual access pattern here. */
3585 uint32_t u32Tmp = 0; /* gcc maybe... */
3586 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3587 if (rcStrict == VINF_SUCCESS)
3588 {
3589 *puEsp = RT_LOWORD(u32Tmp);
3590 *pSelSS = RT_HIWORD(u32Tmp);
3591 return VINF_SUCCESS;
3592 }
3593 }
3594 else
3595 {
3596 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3597 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3598 }
3599 break;
3600 }
3601
3602 /*
3603 * 32-bit TSS (X86TSS32).
3604 */
3605 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3606 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3607 {
3608 uint32_t off = uCpl * 8 + 4;
3609 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3610 {
3611/** @todo check actual access pattern here. */
3612 uint64_t u64Tmp;
3613 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3614 if (rcStrict == VINF_SUCCESS)
3615 {
3616 *puEsp = u64Tmp & UINT32_MAX;
3617 *pSelSS = (RTSEL)(u64Tmp >> 32);
3618 return VINF_SUCCESS;
3619 }
3620 }
3621 else
3622 {
3623 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3624 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3625 }
3626 break;
3627 }
3628
3629 default:
3630 AssertFailed();
3631 rcStrict = VERR_IEM_IPE_4;
3632 break;
3633 }
3634
3635 *puEsp = 0; /* make gcc happy */
3636 *pSelSS = 0; /* make gcc happy */
3637 return rcStrict;
3638}
3639
3640
3641/**
3642 * Loads the specified stack pointer from the 64-bit TSS.
3643 *
3644 * @returns VBox strict status code.
3645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3646 * @param uCpl The CPL to load the stack for.
3647 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3648 * @param puRsp Where to return the new stack pointer.
3649 */
3650IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3651{
3652 Assert(uCpl < 4);
3653 Assert(uIst < 8);
3654 *puRsp = 0; /* make gcc happy */
3655
3656 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3657 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3658
3659 uint32_t off;
3660 if (uIst)
3661 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3662 else
3663 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3664 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3665 {
3666 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3667 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3668 }
3669
3670 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3671}
3672
3673
3674/**
3675 * Adjust the CPU state according to the exception being raised.
3676 *
3677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3678 * @param u8Vector The exception that has been raised.
3679 */
3680DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3681{
3682 switch (u8Vector)
3683 {
3684 case X86_XCPT_DB:
3685 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3686 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3687 break;
3688 /** @todo Read the AMD and Intel exception reference... */
3689 }
3690}
3691
3692
3693/**
3694 * Implements exceptions and interrupts for real mode.
3695 *
3696 * @returns VBox strict status code.
3697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3698 * @param cbInstr The number of bytes to offset rIP by in the return
3699 * address.
3700 * @param u8Vector The interrupt / exception vector number.
3701 * @param fFlags The flags.
3702 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3703 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3704 */
3705IEM_STATIC VBOXSTRICTRC
3706iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3707 uint8_t cbInstr,
3708 uint8_t u8Vector,
3709 uint32_t fFlags,
3710 uint16_t uErr,
3711 uint64_t uCr2)
3712{
3713 NOREF(uErr); NOREF(uCr2);
3714 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3715
3716 /*
3717 * Read the IDT entry.
3718 */
3719 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3720 {
3721 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3722 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3723 }
3724 RTFAR16 Idte;
3725 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3726 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3727 {
3728 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3729 return rcStrict;
3730 }
3731
3732 /*
3733 * Push the stack frame.
3734 */
3735 uint16_t *pu16Frame;
3736 uint64_t uNewRsp;
3737 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3742#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3743 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3744 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3745 fEfl |= UINT16_C(0xf000);
3746#endif
3747 pu16Frame[2] = (uint16_t)fEfl;
3748 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3749 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3750 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3751 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3752 return rcStrict;
3753
3754 /*
3755 * Load the vector address into cs:ip and make exception specific state
3756 * adjustments.
3757 */
3758 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3759 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3760 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3762 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3763 pVCpu->cpum.GstCtx.rip = Idte.off;
3764 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3765 IEMMISC_SET_EFL(pVCpu, fEfl);
3766
3767 /** @todo do we actually do this in real mode? */
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3770
3771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3772}
3773
3774
3775/**
3776 * Loads a NULL data selector into when coming from V8086 mode.
3777 *
3778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3779 * @param pSReg Pointer to the segment register.
3780 */
3781IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3782{
3783 pSReg->Sel = 0;
3784 pSReg->ValidSel = 0;
3785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3786 {
3787 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3788 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3789 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3790 }
3791 else
3792 {
3793 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3794 /** @todo check this on AMD-V */
3795 pSReg->u64Base = 0;
3796 pSReg->u32Limit = 0;
3797 }
3798}
3799
3800
3801/**
3802 * Loads a segment selector during a task switch in V8086 mode.
3803 *
3804 * @param pSReg Pointer to the segment register.
3805 * @param uSel The selector value to load.
3806 */
3807IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3808{
3809 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3810 pSReg->Sel = uSel;
3811 pSReg->ValidSel = uSel;
3812 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3813 pSReg->u64Base = uSel << 4;
3814 pSReg->u32Limit = 0xffff;
3815 pSReg->Attr.u = 0xf3;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into a selector register, both the hidden and
3821 * visible parts, in protected mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 * @param uRpl The RPL.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3828{
3829 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3830 * data selector in protected mode. */
3831 pSReg->Sel = uRpl;
3832 pSReg->ValidSel = uRpl;
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3835 {
3836 /* VT-x (Intel 3960x) observed doing something like this. */
3837 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3838 pSReg->u32Limit = UINT32_MAX;
3839 pSReg->u64Base = 0;
3840 }
3841 else
3842 {
3843 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3844 pSReg->u32Limit = 0;
3845 pSReg->u64Base = 0;
3846 }
3847}
3848
3849
3850/**
3851 * Loads a segment selector during a task switch in protected mode.
3852 *
3853 * In this task switch scenario, we would throw \#TS exceptions rather than
3854 * \#GPs.
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uSel The new selector value.
3860 *
3861 * @remarks This does _not_ handle CS or SS.
3862 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3863 */
3864IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3865{
3866 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3867
3868 /* Null data selector. */
3869 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3870 {
3871 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3872 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3873 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /* Fetch the descriptor. */
3878 IEMSELDESC Desc;
3879 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3883 VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /* Must be a data segment or readable code segment. */
3888 if ( !Desc.Legacy.Gen.u1DescType
3889 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3890 {
3891 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3892 Desc.Legacy.Gen.u4Type));
3893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3894 }
3895
3896 /* Check privileges for data segments and non-conforming code segments. */
3897 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3899 {
3900 /* The RPL and the new CPL must be less than or equal to the DPL. */
3901 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3902 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3905 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908 }
3909
3910 /* Is it there? */
3911 if (!Desc.Legacy.Gen.u1Present)
3912 {
3913 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3915 }
3916
3917 /* The base and limit. */
3918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3919 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3920
3921 /*
3922 * Ok, everything checked out fine. Now set the accessed bit before
3923 * committing the result into the registers.
3924 */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3926 {
3927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3928 if (rcStrict != VINF_SUCCESS)
3929 return rcStrict;
3930 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3931 }
3932
3933 /* Commit */
3934 pSReg->Sel = uSel;
3935 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3936 pSReg->u32Limit = cbLimit;
3937 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3938 pSReg->ValidSel = uSel;
3939 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3940 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3941 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3942
3943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Performs a task switch.
3951 *
3952 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3953 * caller is responsible for performing the necessary checks (like DPL, TSS
3954 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3955 * reference for JMP, CALL, IRET.
3956 *
3957 * If the task switch is the due to a software interrupt or hardware exception,
3958 * the caller is responsible for validating the TSS selector and descriptor. See
3959 * Intel Instruction reference for INT n.
3960 *
3961 * @returns VBox strict status code.
3962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3963 * @param enmTaskSwitch What caused this task switch.
3964 * @param uNextEip The EIP effective after the task switch.
3965 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3966 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3967 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3968 * @param SelTSS The TSS selector of the new task.
3969 * @param pNewDescTSS Pointer to the new TSS descriptor.
3970 */
3971IEM_STATIC VBOXSTRICTRC
3972iemTaskSwitch(PVMCPU pVCpu,
3973 IEMTASKSWITCH enmTaskSwitch,
3974 uint32_t uNextEip,
3975 uint32_t fFlags,
3976 uint16_t uErr,
3977 uint64_t uCr2,
3978 RTSEL SelTSS,
3979 PIEMSELDESC pNewDescTSS)
3980{
3981 Assert(!IEM_IS_REAL_MODE(pVCpu));
3982 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3983 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3984
3985 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3986 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3987 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3988 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3990
3991 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3992 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3993
3994 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3995 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3996
3997 /* Update CR2 in case it's a page-fault. */
3998 /** @todo This should probably be done much earlier in IEM/PGM. See
3999 * @bugref{5653#c49}. */
4000 if (fFlags & IEM_XCPT_FLAGS_CR2)
4001 pVCpu->cpum.GstCtx.cr2 = uCr2;
4002
4003 /*
4004 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4005 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4006 */
4007 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4008 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4009 if (uNewTSSLimit < uNewTSSLimitMin)
4010 {
4011 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4012 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4013 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4014 }
4015
4016 /*
4017 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4018 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4019 */
4020 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4021 {
4022 uint32_t const uExitInfo1 = SelTSS;
4023 uint32_t uExitInfo2 = uErr;
4024 switch (enmTaskSwitch)
4025 {
4026 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4027 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4028 default: break;
4029 }
4030 if (fFlags & IEM_XCPT_FLAGS_ERR)
4031 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4032 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4033 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4034
4035 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4036 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4037 RT_NOREF2(uExitInfo1, uExitInfo2);
4038 }
4039 /** @todo Nested-VMX task-switch intercept. */
4040
4041 /*
4042 * Check the current TSS limit. The last written byte to the current TSS during the
4043 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4044 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4045 *
4046 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4047 * end up with smaller than "legal" TSS limits.
4048 */
4049 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4050 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4051 if (uCurTSSLimit < uCurTSSLimitMin)
4052 {
4053 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4054 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4056 }
4057
4058 /*
4059 * Verify that the new TSS can be accessed and map it. Map only the required contents
4060 * and not the entire TSS.
4061 */
4062 void *pvNewTSS;
4063 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4064 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4065 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4066 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4067 * not perform correct translation if this happens. See Intel spec. 7.2.1
4068 * "Task-State Segment" */
4069 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4070 if (rcStrict != VINF_SUCCESS)
4071 {
4072 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4073 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4074 return rcStrict;
4075 }
4076
4077 /*
4078 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4079 */
4080 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4081 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4082 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4083 {
4084 PX86DESC pDescCurTSS;
4085 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4086 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4087 if (rcStrict != VINF_SUCCESS)
4088 {
4089 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4090 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4091 return rcStrict;
4092 }
4093
4094 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4095 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4096 if (rcStrict != VINF_SUCCESS)
4097 {
4098 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4099 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4100 return rcStrict;
4101 }
4102
4103 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4104 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4105 {
4106 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4107 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4108 u32EFlags &= ~X86_EFL_NT;
4109 }
4110 }
4111
4112 /*
4113 * Save the CPU state into the current TSS.
4114 */
4115 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4116 if (GCPtrNewTSS == GCPtrCurTSS)
4117 {
4118 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4119 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4120 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
4121 }
4122 if (fIsNewTSS386)
4123 {
4124 /*
4125 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4126 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4127 */
4128 void *pvCurTSS32;
4129 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4130 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4131 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4132 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4133 if (rcStrict != VINF_SUCCESS)
4134 {
4135 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4136 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4137 return rcStrict;
4138 }
4139
4140 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4141 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4142 pCurTSS32->eip = uNextEip;
4143 pCurTSS32->eflags = u32EFlags;
4144 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4145 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4146 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4147 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4148 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4149 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4150 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4151 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4152 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4153 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4154 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4155 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4156 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4157 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4158
4159 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4160 if (rcStrict != VINF_SUCCESS)
4161 {
4162 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4163 VBOXSTRICTRC_VAL(rcStrict)));
4164 return rcStrict;
4165 }
4166 }
4167 else
4168 {
4169 /*
4170 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4171 */
4172 void *pvCurTSS16;
4173 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4174 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4175 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4176 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4177 if (rcStrict != VINF_SUCCESS)
4178 {
4179 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4180 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4181 return rcStrict;
4182 }
4183
4184 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4185 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4186 pCurTSS16->ip = uNextEip;
4187 pCurTSS16->flags = u32EFlags;
4188 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4189 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4190 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4191 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4192 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4193 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4194 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4195 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4196 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4197 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4198 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4199 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4200
4201 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4202 if (rcStrict != VINF_SUCCESS)
4203 {
4204 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4205 VBOXSTRICTRC_VAL(rcStrict)));
4206 return rcStrict;
4207 }
4208 }
4209
4210 /*
4211 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4212 */
4213 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4214 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4215 {
4216 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4217 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4218 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4219 }
4220
4221 /*
4222 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4223 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4224 */
4225 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4226 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4227 bool fNewDebugTrap;
4228 if (fIsNewTSS386)
4229 {
4230 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4231 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4232 uNewEip = pNewTSS32->eip;
4233 uNewEflags = pNewTSS32->eflags;
4234 uNewEax = pNewTSS32->eax;
4235 uNewEcx = pNewTSS32->ecx;
4236 uNewEdx = pNewTSS32->edx;
4237 uNewEbx = pNewTSS32->ebx;
4238 uNewEsp = pNewTSS32->esp;
4239 uNewEbp = pNewTSS32->ebp;
4240 uNewEsi = pNewTSS32->esi;
4241 uNewEdi = pNewTSS32->edi;
4242 uNewES = pNewTSS32->es;
4243 uNewCS = pNewTSS32->cs;
4244 uNewSS = pNewTSS32->ss;
4245 uNewDS = pNewTSS32->ds;
4246 uNewFS = pNewTSS32->fs;
4247 uNewGS = pNewTSS32->gs;
4248 uNewLdt = pNewTSS32->selLdt;
4249 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4250 }
4251 else
4252 {
4253 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4254 uNewCr3 = 0;
4255 uNewEip = pNewTSS16->ip;
4256 uNewEflags = pNewTSS16->flags;
4257 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4258 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4259 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4260 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4261 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4262 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4263 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4264 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4265 uNewES = pNewTSS16->es;
4266 uNewCS = pNewTSS16->cs;
4267 uNewSS = pNewTSS16->ss;
4268 uNewDS = pNewTSS16->ds;
4269 uNewFS = 0;
4270 uNewGS = 0;
4271 uNewLdt = pNewTSS16->selLdt;
4272 fNewDebugTrap = false;
4273 }
4274
4275 if (GCPtrNewTSS == GCPtrCurTSS)
4276 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4277 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4278
4279 /*
4280 * We're done accessing the new TSS.
4281 */
4282 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4283 if (rcStrict != VINF_SUCCESS)
4284 {
4285 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4286 return rcStrict;
4287 }
4288
4289 /*
4290 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4291 */
4292 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4293 {
4294 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4295 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4296 if (rcStrict != VINF_SUCCESS)
4297 {
4298 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4299 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4300 return rcStrict;
4301 }
4302
4303 /* Check that the descriptor indicates the new TSS is available (not busy). */
4304 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4305 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4306 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4307
4308 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4309 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4310 if (rcStrict != VINF_SUCCESS)
4311 {
4312 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4313 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4314 return rcStrict;
4315 }
4316 }
4317
4318 /*
4319 * From this point on, we're technically in the new task. We will defer exceptions
4320 * until the completion of the task switch but before executing any instructions in the new task.
4321 */
4322 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4323 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4324 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4325 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4326 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4327 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4328 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4329
4330 /* Set the busy bit in TR. */
4331 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4332 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4333 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4334 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4335 {
4336 uNewEflags |= X86_EFL_NT;
4337 }
4338
4339 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4340 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4341 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4342
4343 pVCpu->cpum.GstCtx.eip = uNewEip;
4344 pVCpu->cpum.GstCtx.eax = uNewEax;
4345 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4346 pVCpu->cpum.GstCtx.edx = uNewEdx;
4347 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4348 pVCpu->cpum.GstCtx.esp = uNewEsp;
4349 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4350 pVCpu->cpum.GstCtx.esi = uNewEsi;
4351 pVCpu->cpum.GstCtx.edi = uNewEdi;
4352
4353 uNewEflags &= X86_EFL_LIVE_MASK;
4354 uNewEflags |= X86_EFL_RA1_MASK;
4355 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4356
4357 /*
4358 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4359 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4360 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4361 */
4362 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4363 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4364
4365 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4366 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4367
4368 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4369 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4370
4371 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4372 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4373
4374 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4375 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4376
4377 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4378 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4379 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4380
4381 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4382 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4383 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4384 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4385
4386 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4387 {
4388 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4389 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4390 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4391 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4392 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4393 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4394 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4395 }
4396
4397 /*
4398 * Switch CR3 for the new task.
4399 */
4400 if ( fIsNewTSS386
4401 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4402 {
4403 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4404 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4405 AssertRCSuccessReturn(rc, rc);
4406
4407 /* Inform PGM. */
4408 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4409 AssertRCReturn(rc, rc);
4410 /* ignore informational status codes */
4411
4412 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4413 }
4414
4415 /*
4416 * Switch LDTR for the new task.
4417 */
4418 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4419 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4420 else
4421 {
4422 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4423
4424 IEMSELDESC DescNewLdt;
4425 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4426 if (rcStrict != VINF_SUCCESS)
4427 {
4428 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4429 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4430 return rcStrict;
4431 }
4432 if ( !DescNewLdt.Legacy.Gen.u1Present
4433 || DescNewLdt.Legacy.Gen.u1DescType
4434 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4435 {
4436 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4437 uNewLdt, DescNewLdt.Legacy.u));
4438 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4439 }
4440
4441 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4442 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4443 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4444 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4445 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4446 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4447 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4448 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4449 }
4450
4451 IEMSELDESC DescSS;
4452 if (IEM_IS_V86_MODE(pVCpu))
4453 {
4454 pVCpu->iem.s.uCpl = 3;
4455 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4456 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4457 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4458 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4459 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4460 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4461
4462 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4463 DescSS.Legacy.u = 0;
4464 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4465 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4466 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4467 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4468 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4469 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4470 DescSS.Legacy.Gen.u2Dpl = 3;
4471 }
4472 else
4473 {
4474 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4475
4476 /*
4477 * Load the stack segment for the new task.
4478 */
4479 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4480 {
4481 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4482 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4483 }
4484
4485 /* Fetch the descriptor. */
4486 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4487 if (rcStrict != VINF_SUCCESS)
4488 {
4489 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4490 VBOXSTRICTRC_VAL(rcStrict)));
4491 return rcStrict;
4492 }
4493
4494 /* SS must be a data segment and writable. */
4495 if ( !DescSS.Legacy.Gen.u1DescType
4496 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4497 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4498 {
4499 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4500 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4501 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4502 }
4503
4504 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4505 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4506 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4507 {
4508 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4509 uNewCpl));
4510 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4511 }
4512
4513 /* Is it there? */
4514 if (!DescSS.Legacy.Gen.u1Present)
4515 {
4516 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4517 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4518 }
4519
4520 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4521 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4522
4523 /* Set the accessed bit before committing the result into SS. */
4524 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4525 {
4526 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4527 if (rcStrict != VINF_SUCCESS)
4528 return rcStrict;
4529 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4530 }
4531
4532 /* Commit SS. */
4533 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4534 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4535 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4536 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4537 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4538 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4539 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4540
4541 /* CPL has changed, update IEM before loading rest of segments. */
4542 pVCpu->iem.s.uCpl = uNewCpl;
4543
4544 /*
4545 * Load the data segments for the new task.
4546 */
4547 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4548 if (rcStrict != VINF_SUCCESS)
4549 return rcStrict;
4550 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4551 if (rcStrict != VINF_SUCCESS)
4552 return rcStrict;
4553 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4554 if (rcStrict != VINF_SUCCESS)
4555 return rcStrict;
4556 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4557 if (rcStrict != VINF_SUCCESS)
4558 return rcStrict;
4559
4560 /*
4561 * Load the code segment for the new task.
4562 */
4563 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4564 {
4565 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4566 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4567 }
4568
4569 /* Fetch the descriptor. */
4570 IEMSELDESC DescCS;
4571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4572 if (rcStrict != VINF_SUCCESS)
4573 {
4574 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4575 return rcStrict;
4576 }
4577
4578 /* CS must be a code segment. */
4579 if ( !DescCS.Legacy.Gen.u1DescType
4580 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4581 {
4582 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4583 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4585 }
4586
4587 /* For conforming CS, DPL must be less than or equal to the RPL. */
4588 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4589 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4590 {
4591 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4592 DescCS.Legacy.Gen.u2Dpl));
4593 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4594 }
4595
4596 /* For non-conforming CS, DPL must match RPL. */
4597 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4598 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4599 {
4600 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4601 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4602 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4603 }
4604
4605 /* Is it there? */
4606 if (!DescCS.Legacy.Gen.u1Present)
4607 {
4608 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4609 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4610 }
4611
4612 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4613 u64Base = X86DESC_BASE(&DescCS.Legacy);
4614
4615 /* Set the accessed bit before committing the result into CS. */
4616 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4617 {
4618 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4619 if (rcStrict != VINF_SUCCESS)
4620 return rcStrict;
4621 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4622 }
4623
4624 /* Commit CS. */
4625 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4626 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4627 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4628 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4629 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4630 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4631 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4632 }
4633
4634 /** @todo Debug trap. */
4635 if (fIsNewTSS386 && fNewDebugTrap)
4636 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4637
4638 /*
4639 * Construct the error code masks based on what caused this task switch.
4640 * See Intel Instruction reference for INT.
4641 */
4642 uint16_t uExt;
4643 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4644 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4645 {
4646 uExt = 1;
4647 }
4648 else
4649 uExt = 0;
4650
4651 /*
4652 * Push any error code on to the new stack.
4653 */
4654 if (fFlags & IEM_XCPT_FLAGS_ERR)
4655 {
4656 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4657 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4658 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4659
4660 /* Check that there is sufficient space on the stack. */
4661 /** @todo Factor out segment limit checking for normal/expand down segments
4662 * into a separate function. */
4663 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4664 {
4665 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4666 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4667 {
4668 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4669 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4670 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4671 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4672 }
4673 }
4674 else
4675 {
4676 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4677 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4678 {
4679 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4680 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4681 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4682 }
4683 }
4684
4685
4686 if (fIsNewTSS386)
4687 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4688 else
4689 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4690 if (rcStrict != VINF_SUCCESS)
4691 {
4692 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4693 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4694 return rcStrict;
4695 }
4696 }
4697
4698 /* Check the new EIP against the new CS limit. */
4699 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4700 {
4701 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4702 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4703 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4704 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4705 }
4706
4707 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4708 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4709}
4710
4711
4712/**
4713 * Implements exceptions and interrupts for protected mode.
4714 *
4715 * @returns VBox strict status code.
4716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4717 * @param cbInstr The number of bytes to offset rIP by in the return
4718 * address.
4719 * @param u8Vector The interrupt / exception vector number.
4720 * @param fFlags The flags.
4721 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4722 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4723 */
4724IEM_STATIC VBOXSTRICTRC
4725iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4726 uint8_t cbInstr,
4727 uint8_t u8Vector,
4728 uint32_t fFlags,
4729 uint16_t uErr,
4730 uint64_t uCr2)
4731{
4732 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4733
4734 /*
4735 * Read the IDT entry.
4736 */
4737 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4738 {
4739 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4740 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4741 }
4742 X86DESC Idte;
4743 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4744 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4745 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4746 {
4747 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4748 return rcStrict;
4749 }
4750 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4751 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4752 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4753
4754 /*
4755 * Check the descriptor type, DPL and such.
4756 * ASSUMES this is done in the same order as described for call-gate calls.
4757 */
4758 if (Idte.Gate.u1DescType)
4759 {
4760 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4761 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4762 }
4763 bool fTaskGate = false;
4764 uint8_t f32BitGate = true;
4765 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4766 switch (Idte.Gate.u4Type)
4767 {
4768 case X86_SEL_TYPE_SYS_UNDEFINED:
4769 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4770 case X86_SEL_TYPE_SYS_LDT:
4771 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4772 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4773 case X86_SEL_TYPE_SYS_UNDEFINED2:
4774 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4775 case X86_SEL_TYPE_SYS_UNDEFINED3:
4776 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4777 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4778 case X86_SEL_TYPE_SYS_UNDEFINED4:
4779 {
4780 /** @todo check what actually happens when the type is wrong...
4781 * esp. call gates. */
4782 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4783 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4784 }
4785
4786 case X86_SEL_TYPE_SYS_286_INT_GATE:
4787 f32BitGate = false;
4788 RT_FALL_THRU();
4789 case X86_SEL_TYPE_SYS_386_INT_GATE:
4790 fEflToClear |= X86_EFL_IF;
4791 break;
4792
4793 case X86_SEL_TYPE_SYS_TASK_GATE:
4794 fTaskGate = true;
4795#ifndef IEM_IMPLEMENTS_TASKSWITCH
4796 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4797#endif
4798 break;
4799
4800 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4801 f32BitGate = false;
4802 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4803 break;
4804
4805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4806 }
4807
4808 /* Check DPL against CPL if applicable. */
4809 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4810 {
4811 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4814 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4815 }
4816 }
4817
4818 /* Is it there? */
4819 if (!Idte.Gate.u1Present)
4820 {
4821 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4822 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4823 }
4824
4825 /* Is it a task-gate? */
4826 if (fTaskGate)
4827 {
4828 /*
4829 * Construct the error code masks based on what caused this task switch.
4830 * See Intel Instruction reference for INT.
4831 */
4832 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4833 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4834 RTSEL SelTSS = Idte.Gate.u16Sel;
4835
4836 /*
4837 * Fetch the TSS descriptor in the GDT.
4838 */
4839 IEMSELDESC DescTSS;
4840 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4841 if (rcStrict != VINF_SUCCESS)
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4844 VBOXSTRICTRC_VAL(rcStrict)));
4845 return rcStrict;
4846 }
4847
4848 /* The TSS descriptor must be a system segment and be available (not busy). */
4849 if ( DescTSS.Legacy.Gen.u1DescType
4850 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4851 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4854 u8Vector, SelTSS, DescTSS.Legacy.au64));
4855 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4856 }
4857
4858 /* The TSS must be present. */
4859 if (!DescTSS.Legacy.Gen.u1Present)
4860 {
4861 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4862 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4863 }
4864
4865 /* Do the actual task switch. */
4866 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4867 }
4868
4869 /* A null CS is bad. */
4870 RTSEL NewCS = Idte.Gate.u16Sel;
4871 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4874 return iemRaiseGeneralProtectionFault0(pVCpu);
4875 }
4876
4877 /* Fetch the descriptor for the new CS. */
4878 IEMSELDESC DescCS;
4879 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4880 if (rcStrict != VINF_SUCCESS)
4881 {
4882 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4883 return rcStrict;
4884 }
4885
4886 /* Must be a code segment. */
4887 if (!DescCS.Legacy.Gen.u1DescType)
4888 {
4889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4890 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4891 }
4892 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4893 {
4894 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4895 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4896 }
4897
4898 /* Don't allow lowering the privilege level. */
4899 /** @todo Does the lowering of privileges apply to software interrupts
4900 * only? This has bearings on the more-privileged or
4901 * same-privilege stack behavior further down. A testcase would
4902 * be nice. */
4903 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4906 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4907 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4908 }
4909
4910 /* Make sure the selector is present. */
4911 if (!DescCS.Legacy.Gen.u1Present)
4912 {
4913 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4914 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4915 }
4916
4917 /* Check the new EIP against the new CS limit. */
4918 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4919 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4920 ? Idte.Gate.u16OffsetLow
4921 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4922 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4923 if (uNewEip > cbLimitCS)
4924 {
4925 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4926 u8Vector, uNewEip, cbLimitCS, NewCS));
4927 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4928 }
4929 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4930
4931 /* Calc the flag image to push. */
4932 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4933 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4934 fEfl &= ~X86_EFL_RF;
4935 else
4936 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4937
4938 /* From V8086 mode only go to CPL 0. */
4939 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4940 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4941 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4942 {
4943 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4944 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4945 }
4946
4947 /*
4948 * If the privilege level changes, we need to get a new stack from the TSS.
4949 * This in turns means validating the new SS and ESP...
4950 */
4951 if (uNewCpl != pVCpu->iem.s.uCpl)
4952 {
4953 RTSEL NewSS;
4954 uint32_t uNewEsp;
4955 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4956 if (rcStrict != VINF_SUCCESS)
4957 return rcStrict;
4958
4959 IEMSELDESC DescSS;
4960 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4961 if (rcStrict != VINF_SUCCESS)
4962 return rcStrict;
4963 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4964 if (!DescSS.Legacy.Gen.u1DefBig)
4965 {
4966 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4967 uNewEsp = (uint16_t)uNewEsp;
4968 }
4969
4970 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4971
4972 /* Check that there is sufficient space for the stack frame. */
4973 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4974 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4975 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4976 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4977
4978 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4979 {
4980 if ( uNewEsp - 1 > cbLimitSS
4981 || uNewEsp < cbStackFrame)
4982 {
4983 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4984 u8Vector, NewSS, uNewEsp, cbStackFrame));
4985 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4986 }
4987 }
4988 else
4989 {
4990 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4991 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4992 {
4993 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4994 u8Vector, NewSS, uNewEsp, cbStackFrame));
4995 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4996 }
4997 }
4998
4999 /*
5000 * Start making changes.
5001 */
5002
5003 /* Set the new CPL so that stack accesses use it. */
5004 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5005 pVCpu->iem.s.uCpl = uNewCpl;
5006
5007 /* Create the stack frame. */
5008 RTPTRUNION uStackFrame;
5009 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5010 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5011 if (rcStrict != VINF_SUCCESS)
5012 return rcStrict;
5013 void * const pvStackFrame = uStackFrame.pv;
5014 if (f32BitGate)
5015 {
5016 if (fFlags & IEM_XCPT_FLAGS_ERR)
5017 *uStackFrame.pu32++ = uErr;
5018 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5019 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5020 uStackFrame.pu32[2] = fEfl;
5021 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5022 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5023 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5024 if (fEfl & X86_EFL_VM)
5025 {
5026 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5027 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5028 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5029 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5030 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5031 }
5032 }
5033 else
5034 {
5035 if (fFlags & IEM_XCPT_FLAGS_ERR)
5036 *uStackFrame.pu16++ = uErr;
5037 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5038 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5039 uStackFrame.pu16[2] = fEfl;
5040 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5041 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5042 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5043 if (fEfl & X86_EFL_VM)
5044 {
5045 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5046 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5047 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5048 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5049 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5050 }
5051 }
5052 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5053 if (rcStrict != VINF_SUCCESS)
5054 return rcStrict;
5055
5056 /* Mark the selectors 'accessed' (hope this is the correct time). */
5057 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5058 * after pushing the stack frame? (Write protect the gdt + stack to
5059 * find out.) */
5060 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5061 {
5062 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5063 if (rcStrict != VINF_SUCCESS)
5064 return rcStrict;
5065 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5066 }
5067
5068 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5069 {
5070 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5071 if (rcStrict != VINF_SUCCESS)
5072 return rcStrict;
5073 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5074 }
5075
5076 /*
5077 * Start comitting the register changes (joins with the DPL=CPL branch).
5078 */
5079 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5080 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5081 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5082 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5083 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5084 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5085 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5086 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5087 * SP is loaded).
5088 * Need to check the other combinations too:
5089 * - 16-bit TSS, 32-bit handler
5090 * - 32-bit TSS, 16-bit handler */
5091 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5092 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5093 else
5094 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5095
5096 if (fEfl & X86_EFL_VM)
5097 {
5098 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5099 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5100 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5101 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5102 }
5103 }
5104 /*
5105 * Same privilege, no stack change and smaller stack frame.
5106 */
5107 else
5108 {
5109 uint64_t uNewRsp;
5110 RTPTRUNION uStackFrame;
5111 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5112 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5113 if (rcStrict != VINF_SUCCESS)
5114 return rcStrict;
5115 void * const pvStackFrame = uStackFrame.pv;
5116
5117 if (f32BitGate)
5118 {
5119 if (fFlags & IEM_XCPT_FLAGS_ERR)
5120 *uStackFrame.pu32++ = uErr;
5121 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5122 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5123 uStackFrame.pu32[2] = fEfl;
5124 }
5125 else
5126 {
5127 if (fFlags & IEM_XCPT_FLAGS_ERR)
5128 *uStackFrame.pu16++ = uErr;
5129 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5130 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5131 uStackFrame.pu16[2] = fEfl;
5132 }
5133 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5134 if (rcStrict != VINF_SUCCESS)
5135 return rcStrict;
5136
5137 /* Mark the CS selector as 'accessed'. */
5138 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5139 {
5140 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5141 if (rcStrict != VINF_SUCCESS)
5142 return rcStrict;
5143 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5144 }
5145
5146 /*
5147 * Start committing the register changes (joins with the other branch).
5148 */
5149 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5150 }
5151
5152 /* ... register committing continues. */
5153 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5154 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5155 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5156 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5157 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5158 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5159
5160 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5161 fEfl &= ~fEflToClear;
5162 IEMMISC_SET_EFL(pVCpu, fEfl);
5163
5164 if (fFlags & IEM_XCPT_FLAGS_CR2)
5165 pVCpu->cpum.GstCtx.cr2 = uCr2;
5166
5167 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5168 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5169
5170 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5171}
5172
5173
5174/**
5175 * Implements exceptions and interrupts for long mode.
5176 *
5177 * @returns VBox strict status code.
5178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5179 * @param cbInstr The number of bytes to offset rIP by in the return
5180 * address.
5181 * @param u8Vector The interrupt / exception vector number.
5182 * @param fFlags The flags.
5183 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5184 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5185 */
5186IEM_STATIC VBOXSTRICTRC
5187iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5188 uint8_t cbInstr,
5189 uint8_t u8Vector,
5190 uint32_t fFlags,
5191 uint16_t uErr,
5192 uint64_t uCr2)
5193{
5194 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5195
5196 /*
5197 * Read the IDT entry.
5198 */
5199 uint16_t offIdt = (uint16_t)u8Vector << 4;
5200 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5201 {
5202 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5203 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5204 }
5205 X86DESC64 Idte;
5206 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5207 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5208 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5209 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5210 {
5211 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5212 return rcStrict;
5213 }
5214 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5215 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5216 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5217
5218 /*
5219 * Check the descriptor type, DPL and such.
5220 * ASSUMES this is done in the same order as described for call-gate calls.
5221 */
5222 if (Idte.Gate.u1DescType)
5223 {
5224 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5225 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5226 }
5227 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5228 switch (Idte.Gate.u4Type)
5229 {
5230 case AMD64_SEL_TYPE_SYS_INT_GATE:
5231 fEflToClear |= X86_EFL_IF;
5232 break;
5233 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5234 break;
5235
5236 default:
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5238 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5239 }
5240
5241 /* Check DPL against CPL if applicable. */
5242 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5243 {
5244 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5245 {
5246 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5247 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5248 }
5249 }
5250
5251 /* Is it there? */
5252 if (!Idte.Gate.u1Present)
5253 {
5254 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5255 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5256 }
5257
5258 /* A null CS is bad. */
5259 RTSEL NewCS = Idte.Gate.u16Sel;
5260 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5263 return iemRaiseGeneralProtectionFault0(pVCpu);
5264 }
5265
5266 /* Fetch the descriptor for the new CS. */
5267 IEMSELDESC DescCS;
5268 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5269 if (rcStrict != VINF_SUCCESS)
5270 {
5271 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5272 return rcStrict;
5273 }
5274
5275 /* Must be a 64-bit code segment. */
5276 if (!DescCS.Long.Gen.u1DescType)
5277 {
5278 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5279 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5280 }
5281 if ( !DescCS.Long.Gen.u1Long
5282 || DescCS.Long.Gen.u1DefBig
5283 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5284 {
5285 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5286 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5287 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5288 }
5289
5290 /* Don't allow lowering the privilege level. For non-conforming CS
5291 selectors, the CS.DPL sets the privilege level the trap/interrupt
5292 handler runs at. For conforming CS selectors, the CPL remains
5293 unchanged, but the CS.DPL must be <= CPL. */
5294 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5295 * when CPU in Ring-0. Result \#GP? */
5296 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5297 {
5298 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5299 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5300 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5301 }
5302
5303
5304 /* Make sure the selector is present. */
5305 if (!DescCS.Legacy.Gen.u1Present)
5306 {
5307 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5308 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5309 }
5310
5311 /* Check that the new RIP is canonical. */
5312 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5313 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5314 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5315 if (!IEM_IS_CANONICAL(uNewRip))
5316 {
5317 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5318 return iemRaiseGeneralProtectionFault0(pVCpu);
5319 }
5320
5321 /*
5322 * If the privilege level changes or if the IST isn't zero, we need to get
5323 * a new stack from the TSS.
5324 */
5325 uint64_t uNewRsp;
5326 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5327 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5328 if ( uNewCpl != pVCpu->iem.s.uCpl
5329 || Idte.Gate.u3IST != 0)
5330 {
5331 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5332 if (rcStrict != VINF_SUCCESS)
5333 return rcStrict;
5334 }
5335 else
5336 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5337 uNewRsp &= ~(uint64_t)0xf;
5338
5339 /*
5340 * Calc the flag image to push.
5341 */
5342 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5343 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5344 fEfl &= ~X86_EFL_RF;
5345 else
5346 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5347
5348 /*
5349 * Start making changes.
5350 */
5351 /* Set the new CPL so that stack accesses use it. */
5352 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5353 pVCpu->iem.s.uCpl = uNewCpl;
5354
5355 /* Create the stack frame. */
5356 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5357 RTPTRUNION uStackFrame;
5358 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5359 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5360 if (rcStrict != VINF_SUCCESS)
5361 return rcStrict;
5362 void * const pvStackFrame = uStackFrame.pv;
5363
5364 if (fFlags & IEM_XCPT_FLAGS_ERR)
5365 *uStackFrame.pu64++ = uErr;
5366 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5367 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5368 uStackFrame.pu64[2] = fEfl;
5369 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5370 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5371 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5372 if (rcStrict != VINF_SUCCESS)
5373 return rcStrict;
5374
5375 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5376 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5377 * after pushing the stack frame? (Write protect the gdt + stack to
5378 * find out.) */
5379 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5380 {
5381 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5382 if (rcStrict != VINF_SUCCESS)
5383 return rcStrict;
5384 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5385 }
5386
5387 /*
5388 * Start comitting the register changes.
5389 */
5390 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5391 * hidden registers when interrupting 32-bit or 16-bit code! */
5392 if (uNewCpl != uOldCpl)
5393 {
5394 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5395 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5396 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5397 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5398 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5399 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5400 }
5401 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5402 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5403 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5404 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5405 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5406 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5407 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5408 pVCpu->cpum.GstCtx.rip = uNewRip;
5409
5410 fEfl &= ~fEflToClear;
5411 IEMMISC_SET_EFL(pVCpu, fEfl);
5412
5413 if (fFlags & IEM_XCPT_FLAGS_CR2)
5414 pVCpu->cpum.GstCtx.cr2 = uCr2;
5415
5416 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5417 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5418
5419 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5420}
5421
5422
5423/**
5424 * Implements exceptions and interrupts.
5425 *
5426 * All exceptions and interrupts goes thru this function!
5427 *
5428 * @returns VBox strict status code.
5429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5430 * @param cbInstr The number of bytes to offset rIP by in the return
5431 * address.
5432 * @param u8Vector The interrupt / exception vector number.
5433 * @param fFlags The flags.
5434 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5435 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5436 */
5437DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5438iemRaiseXcptOrInt(PVMCPU pVCpu,
5439 uint8_t cbInstr,
5440 uint8_t u8Vector,
5441 uint32_t fFlags,
5442 uint16_t uErr,
5443 uint64_t uCr2)
5444{
5445 /*
5446 * Get all the state that we might need here.
5447 */
5448 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5449 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5450
5451#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5452 /*
5453 * Flush prefetch buffer
5454 */
5455 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5456#endif
5457
5458 /*
5459 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5460 */
5461 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5462 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5463 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5464 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5465 {
5466 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5467 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5468 u8Vector = X86_XCPT_GP;
5469 uErr = 0;
5470 }
5471#ifdef DBGFTRACE_ENABLED
5472 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5473 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5474 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5475#endif
5476
5477#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5478 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5479 {
5480 /*
5481 * If the event is being injected as part of VMRUN, it isn't subject to event
5482 * intercepts in the nested-guest. However, secondary exceptions that occur
5483 * during injection of any event -are- subject to exception intercepts.
5484 * See AMD spec. 15.20 "Event Injection".
5485 */
5486 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5487 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5488 else
5489 {
5490 /*
5491 * Check and handle if the event being raised is intercepted.
5492 */
5493 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5494 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5495 return rcStrict0;
5496 }
5497 }
5498#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5499
5500 /*
5501 * Do recursion accounting.
5502 */
5503 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5504 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5505 if (pVCpu->iem.s.cXcptRecursions == 0)
5506 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5507 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5508 else
5509 {
5510 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5511 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5512 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5513
5514 if (pVCpu->iem.s.cXcptRecursions >= 4)
5515 {
5516#ifdef DEBUG_bird
5517 AssertFailed();
5518#endif
5519 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5520 }
5521
5522 /*
5523 * Evaluate the sequence of recurring events.
5524 */
5525 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5526 NULL /* pXcptRaiseInfo */);
5527 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5528 { /* likely */ }
5529 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5530 {
5531 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5532 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5533 u8Vector = X86_XCPT_DF;
5534 uErr = 0;
5535 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5536 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5537 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5538 }
5539 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5540 {
5541 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5542 return iemInitiateCpuShutdown(pVCpu);
5543 }
5544 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5545 {
5546 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5547 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5548 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5549 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5550 return VERR_EM_GUEST_CPU_HANG;
5551 }
5552 else
5553 {
5554 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5555 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5556 return VERR_IEM_IPE_9;
5557 }
5558
5559 /*
5560 * The 'EXT' bit is set when an exception occurs during deliver of an external
5561 * event (such as an interrupt or earlier exception)[1]. Privileged software
5562 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5563 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5564 *
5565 * [1] - Intel spec. 6.13 "Error Code"
5566 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5567 * [3] - Intel Instruction reference for INT n.
5568 */
5569 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5570 && (fFlags & IEM_XCPT_FLAGS_ERR)
5571 && u8Vector != X86_XCPT_PF
5572 && u8Vector != X86_XCPT_DF)
5573 {
5574 uErr |= X86_TRAP_ERR_EXTERNAL;
5575 }
5576 }
5577
5578 pVCpu->iem.s.cXcptRecursions++;
5579 pVCpu->iem.s.uCurXcpt = u8Vector;
5580 pVCpu->iem.s.fCurXcpt = fFlags;
5581 pVCpu->iem.s.uCurXcptErr = uErr;
5582 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5583
5584 /*
5585 * Extensive logging.
5586 */
5587#if defined(LOG_ENABLED) && defined(IN_RING3)
5588 if (LogIs3Enabled())
5589 {
5590 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5591 PVM pVM = pVCpu->CTX_SUFF(pVM);
5592 char szRegs[4096];
5593 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5594 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5595 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5596 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5597 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5598 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5599 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5600 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5601 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5602 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5603 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5604 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5605 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5606 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5607 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5608 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5609 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5610 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5611 " efer=%016VR{efer}\n"
5612 " pat=%016VR{pat}\n"
5613 " sf_mask=%016VR{sf_mask}\n"
5614 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5615 " lstar=%016VR{lstar}\n"
5616 " star=%016VR{star} cstar=%016VR{cstar}\n"
5617 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5618 );
5619
5620 char szInstr[256];
5621 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5622 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5623 szInstr, sizeof(szInstr), NULL);
5624 Log3(("%s%s\n", szRegs, szInstr));
5625 }
5626#endif /* LOG_ENABLED */
5627
5628 /*
5629 * Call the mode specific worker function.
5630 */
5631 VBOXSTRICTRC rcStrict;
5632 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5633 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5634 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5635 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5636 else
5637 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5638
5639 /* Flush the prefetch buffer. */
5640#ifdef IEM_WITH_CODE_TLB
5641 pVCpu->iem.s.pbInstrBuf = NULL;
5642#else
5643 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5644#endif
5645
5646 /*
5647 * Unwind.
5648 */
5649 pVCpu->iem.s.cXcptRecursions--;
5650 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5651 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5652 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5653 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5654 pVCpu->iem.s.cXcptRecursions + 1));
5655 return rcStrict;
5656}
5657
5658#ifdef IEM_WITH_SETJMP
5659/**
5660 * See iemRaiseXcptOrInt. Will not return.
5661 */
5662IEM_STATIC DECL_NO_RETURN(void)
5663iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5664 uint8_t cbInstr,
5665 uint8_t u8Vector,
5666 uint32_t fFlags,
5667 uint16_t uErr,
5668 uint64_t uCr2)
5669{
5670 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5671 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5672}
5673#endif
5674
5675
5676/** \#DE - 00. */
5677DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5678{
5679 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5680}
5681
5682
5683/** \#DB - 01.
5684 * @note This automatically clear DR7.GD. */
5685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5686{
5687 /** @todo set/clear RF. */
5688 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5690}
5691
5692
5693/** \#BR - 05. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5697}
5698
5699
5700/** \#UD - 06. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5704}
5705
5706
5707/** \#NM - 07. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5711}
5712
5713
5714/** \#TS(err) - 0a. */
5715DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5716{
5717 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5718}
5719
5720
5721/** \#TS(tr) - 0a. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5723{
5724 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5725 pVCpu->cpum.GstCtx.tr.Sel, 0);
5726}
5727
5728
5729/** \#TS(0) - 0a. */
5730DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5731{
5732 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5733 0, 0);
5734}
5735
5736
5737/** \#TS(err) - 0a. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5739{
5740 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5741 uSel & X86_SEL_MASK_OFF_RPL, 0);
5742}
5743
5744
5745/** \#NP(err) - 0b. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5749}
5750
5751
5752/** \#NP(sel) - 0b. */
5753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5754{
5755 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5756 uSel & ~X86_SEL_RPL, 0);
5757}
5758
5759
5760/** \#SS(seg) - 0c. */
5761DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5762{
5763 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5764 uSel & ~X86_SEL_RPL, 0);
5765}
5766
5767
5768/** \#SS(err) - 0c. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5772}
5773
5774
5775/** \#GP(n) - 0d. */
5776DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5777{
5778 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5779}
5780
5781
5782/** \#GP(0) - 0d. */
5783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5784{
5785 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5786}
5787
5788#ifdef IEM_WITH_SETJMP
5789/** \#GP(0) - 0d. */
5790DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5791{
5792 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5793}
5794#endif
5795
5796
5797/** \#GP(sel) - 0d. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5799{
5800 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5801 Sel & ~X86_SEL_RPL, 0);
5802}
5803
5804
5805/** \#GP(0) - 0d. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5807{
5808 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5809}
5810
5811
5812/** \#GP(sel) - 0d. */
5813DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5814{
5815 NOREF(iSegReg); NOREF(fAccess);
5816 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5817 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5818}
5819
5820#ifdef IEM_WITH_SETJMP
5821/** \#GP(sel) - 0d, longjmp. */
5822DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5823{
5824 NOREF(iSegReg); NOREF(fAccess);
5825 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5826 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5827}
5828#endif
5829
5830/** \#GP(sel) - 0d. */
5831DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5832{
5833 NOREF(Sel);
5834 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5835}
5836
5837#ifdef IEM_WITH_SETJMP
5838/** \#GP(sel) - 0d, longjmp. */
5839DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5840{
5841 NOREF(Sel);
5842 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5843}
5844#endif
5845
5846
5847/** \#GP(sel) - 0d. */
5848DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5849{
5850 NOREF(iSegReg); NOREF(fAccess);
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5852}
5853
5854#ifdef IEM_WITH_SETJMP
5855/** \#GP(sel) - 0d, longjmp. */
5856DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5857 uint32_t fAccess)
5858{
5859 NOREF(iSegReg); NOREF(fAccess);
5860 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5861}
5862#endif
5863
5864
5865/** \#PF(n) - 0e. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5867{
5868 uint16_t uErr;
5869 switch (rc)
5870 {
5871 case VERR_PAGE_NOT_PRESENT:
5872 case VERR_PAGE_TABLE_NOT_PRESENT:
5873 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5874 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5875 uErr = 0;
5876 break;
5877
5878 default:
5879 AssertMsgFailed(("%Rrc\n", rc));
5880 RT_FALL_THRU();
5881 case VERR_ACCESS_DENIED:
5882 uErr = X86_TRAP_PF_P;
5883 break;
5884
5885 /** @todo reserved */
5886 }
5887
5888 if (pVCpu->iem.s.uCpl == 3)
5889 uErr |= X86_TRAP_PF_US;
5890
5891 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5892 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5893 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5894 uErr |= X86_TRAP_PF_ID;
5895
5896#if 0 /* This is so much non-sense, really. Why was it done like that? */
5897 /* Note! RW access callers reporting a WRITE protection fault, will clear
5898 the READ flag before calling. So, read-modify-write accesses (RW)
5899 can safely be reported as READ faults. */
5900 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5901 uErr |= X86_TRAP_PF_RW;
5902#else
5903 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5904 {
5905 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5906 uErr |= X86_TRAP_PF_RW;
5907 }
5908#endif
5909
5910 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5911 uErr, GCPtrWhere);
5912}
5913
5914#ifdef IEM_WITH_SETJMP
5915/** \#PF(n) - 0e, longjmp. */
5916IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5917{
5918 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5919}
5920#endif
5921
5922
5923/** \#MF(0) - 10. */
5924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5925{
5926 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5927}
5928
5929
5930/** \#AC(0) - 11. */
5931DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5932{
5933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5934}
5935
5936
5937/**
5938 * Macro for calling iemCImplRaiseDivideError().
5939 *
5940 * This enables us to add/remove arguments and force different levels of
5941 * inlining as we wish.
5942 *
5943 * @return Strict VBox status code.
5944 */
5945#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5946IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5947{
5948 NOREF(cbInstr);
5949 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5950}
5951
5952
5953/**
5954 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5955 *
5956 * This enables us to add/remove arguments and force different levels of
5957 * inlining as we wish.
5958 *
5959 * @return Strict VBox status code.
5960 */
5961#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5962IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5963{
5964 NOREF(cbInstr);
5965 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5966}
5967
5968
5969/**
5970 * Macro for calling iemCImplRaiseInvalidOpcode().
5971 *
5972 * This enables us to add/remove arguments and force different levels of
5973 * inlining as we wish.
5974 *
5975 * @return Strict VBox status code.
5976 */
5977#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5978IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5979{
5980 NOREF(cbInstr);
5981 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5982}
5983
5984
5985/** @} */
5986
5987
5988/*
5989 *
5990 * Helpers routines.
5991 * Helpers routines.
5992 * Helpers routines.
5993 *
5994 */
5995
5996/**
5997 * Recalculates the effective operand size.
5998 *
5999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6000 */
6001IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6002{
6003 switch (pVCpu->iem.s.enmCpuMode)
6004 {
6005 case IEMMODE_16BIT:
6006 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6007 break;
6008 case IEMMODE_32BIT:
6009 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6010 break;
6011 case IEMMODE_64BIT:
6012 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6013 {
6014 case 0:
6015 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6016 break;
6017 case IEM_OP_PRF_SIZE_OP:
6018 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6019 break;
6020 case IEM_OP_PRF_SIZE_REX_W:
6021 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6022 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6023 break;
6024 }
6025 break;
6026 default:
6027 AssertFailed();
6028 }
6029}
6030
6031
6032/**
6033 * Sets the default operand size to 64-bit and recalculates the effective
6034 * operand size.
6035 *
6036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6037 */
6038IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6039{
6040 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6041 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6042 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6043 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6044 else
6045 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6046}
6047
6048
6049/*
6050 *
6051 * Common opcode decoders.
6052 * Common opcode decoders.
6053 * Common opcode decoders.
6054 *
6055 */
6056//#include <iprt/mem.h>
6057
6058/**
6059 * Used to add extra details about a stub case.
6060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6061 */
6062IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6063{
6064#if defined(LOG_ENABLED) && defined(IN_RING3)
6065 PVM pVM = pVCpu->CTX_SUFF(pVM);
6066 char szRegs[4096];
6067 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6068 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6069 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6070 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6071 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6072 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6073 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6074 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6075 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6076 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6077 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6078 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6079 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6080 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6081 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6082 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6083 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6084 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6085 " efer=%016VR{efer}\n"
6086 " pat=%016VR{pat}\n"
6087 " sf_mask=%016VR{sf_mask}\n"
6088 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6089 " lstar=%016VR{lstar}\n"
6090 " star=%016VR{star} cstar=%016VR{cstar}\n"
6091 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6092 );
6093
6094 char szInstr[256];
6095 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6096 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6097 szInstr, sizeof(szInstr), NULL);
6098
6099 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6100#else
6101 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6102#endif
6103}
6104
6105/**
6106 * Complains about a stub.
6107 *
6108 * Providing two versions of this macro, one for daily use and one for use when
6109 * working on IEM.
6110 */
6111#if 0
6112# define IEMOP_BITCH_ABOUT_STUB() \
6113 do { \
6114 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6115 iemOpStubMsg2(pVCpu); \
6116 RTAssertPanic(); \
6117 } while (0)
6118#else
6119# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6120#endif
6121
6122/** Stubs an opcode. */
6123#define FNIEMOP_STUB(a_Name) \
6124 FNIEMOP_DEF(a_Name) \
6125 { \
6126 RT_NOREF_PV(pVCpu); \
6127 IEMOP_BITCH_ABOUT_STUB(); \
6128 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6129 } \
6130 typedef int ignore_semicolon
6131
6132/** Stubs an opcode. */
6133#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6134 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6135 { \
6136 RT_NOREF_PV(pVCpu); \
6137 RT_NOREF_PV(a_Name0); \
6138 IEMOP_BITCH_ABOUT_STUB(); \
6139 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6140 } \
6141 typedef int ignore_semicolon
6142
6143/** Stubs an opcode which currently should raise \#UD. */
6144#define FNIEMOP_UD_STUB(a_Name) \
6145 FNIEMOP_DEF(a_Name) \
6146 { \
6147 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6148 return IEMOP_RAISE_INVALID_OPCODE(); \
6149 } \
6150 typedef int ignore_semicolon
6151
6152/** Stubs an opcode which currently should raise \#UD. */
6153#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6154 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6155 { \
6156 RT_NOREF_PV(pVCpu); \
6157 RT_NOREF_PV(a_Name0); \
6158 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6159 return IEMOP_RAISE_INVALID_OPCODE(); \
6160 } \
6161 typedef int ignore_semicolon
6162
6163
6164
6165/** @name Register Access.
6166 * @{
6167 */
6168
6169/**
6170 * Gets a reference (pointer) to the specified hidden segment register.
6171 *
6172 * @returns Hidden register reference.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 * @param iSegReg The segment register.
6175 */
6176IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6177{
6178 Assert(iSegReg < X86_SREG_COUNT);
6179 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6180 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6181
6182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6183 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6184 { /* likely */ }
6185 else
6186 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6187#else
6188 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6189#endif
6190 return pSReg;
6191}
6192
6193
6194/**
6195 * Ensures that the given hidden segment register is up to date.
6196 *
6197 * @returns Hidden register reference.
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param pSReg The segment register.
6200 */
6201IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6202{
6203#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6204 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6205 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6206#else
6207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6208 NOREF(pVCpu);
6209#endif
6210 return pSReg;
6211}
6212
6213
6214/**
6215 * Gets a reference (pointer) to the specified segment register (the selector
6216 * value).
6217 *
6218 * @returns Pointer to the selector variable.
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param iSegReg The segment register.
6221 */
6222DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6223{
6224 Assert(iSegReg < X86_SREG_COUNT);
6225 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6226 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6227}
6228
6229
6230/**
6231 * Fetches the selector value of a segment register.
6232 *
6233 * @returns The selector value.
6234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6235 * @param iSegReg The segment register.
6236 */
6237DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6238{
6239 Assert(iSegReg < X86_SREG_COUNT);
6240 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6241 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6242}
6243
6244
6245/**
6246 * Fetches the base address value of a segment register.
6247 *
6248 * @returns The selector value.
6249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6250 * @param iSegReg The segment register.
6251 */
6252DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6253{
6254 Assert(iSegReg < X86_SREG_COUNT);
6255 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6256 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6257}
6258
6259
6260/**
6261 * Gets a reference (pointer) to the specified general purpose register.
6262 *
6263 * @returns Register reference.
6264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6265 * @param iReg The general purpose register.
6266 */
6267DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6268{
6269 Assert(iReg < 16);
6270 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6271}
6272
6273
6274/**
6275 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6276 *
6277 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6278 *
6279 * @returns Register reference.
6280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6281 * @param iReg The register.
6282 */
6283DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6284{
6285 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6286 {
6287 Assert(iReg < 16);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6289 }
6290 /* high 8-bit register. */
6291 Assert(iReg < 8);
6292 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6293}
6294
6295
6296/**
6297 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6298 *
6299 * @returns Register reference.
6300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6301 * @param iReg The register.
6302 */
6303DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6304{
6305 Assert(iReg < 16);
6306 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6307}
6308
6309
6310/**
6311 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6312 *
6313 * @returns Register reference.
6314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6315 * @param iReg The register.
6316 */
6317DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6318{
6319 Assert(iReg < 16);
6320 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6321}
6322
6323
6324/**
6325 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6326 *
6327 * @returns Register reference.
6328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6329 * @param iReg The register.
6330 */
6331DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6332{
6333 Assert(iReg < 64);
6334 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6335}
6336
6337
6338/**
6339 * Gets a reference (pointer) to the specified segment register's base address.
6340 *
6341 * @returns Segment register base address reference.
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param iSegReg The segment selector.
6344 */
6345DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6346{
6347 Assert(iSegReg < X86_SREG_COUNT);
6348 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6349 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6350}
6351
6352
6353/**
6354 * Fetches the value of a 8-bit general purpose register.
6355 *
6356 * @returns The register value.
6357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6358 * @param iReg The register.
6359 */
6360DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6361{
6362 return *iemGRegRefU8(pVCpu, iReg);
6363}
6364
6365
6366/**
6367 * Fetches the value of a 16-bit general purpose register.
6368 *
6369 * @returns The register value.
6370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6371 * @param iReg The register.
6372 */
6373DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6374{
6375 Assert(iReg < 16);
6376 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6377}
6378
6379
6380/**
6381 * Fetches the value of a 32-bit general purpose register.
6382 *
6383 * @returns The register value.
6384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6385 * @param iReg The register.
6386 */
6387DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6388{
6389 Assert(iReg < 16);
6390 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6391}
6392
6393
6394/**
6395 * Fetches the value of a 64-bit general purpose register.
6396 *
6397 * @returns The register value.
6398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6399 * @param iReg The register.
6400 */
6401DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6402{
6403 Assert(iReg < 16);
6404 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6405}
6406
6407
6408/**
6409 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6410 *
6411 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6412 * segment limit.
6413 *
6414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6415 * @param offNextInstr The offset of the next instruction.
6416 */
6417IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6418{
6419 switch (pVCpu->iem.s.enmEffOpSize)
6420 {
6421 case IEMMODE_16BIT:
6422 {
6423 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6424 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6425 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6426 return iemRaiseGeneralProtectionFault0(pVCpu);
6427 pVCpu->cpum.GstCtx.rip = uNewIp;
6428 break;
6429 }
6430
6431 case IEMMODE_32BIT:
6432 {
6433 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6434 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6435
6436 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6437 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6438 return iemRaiseGeneralProtectionFault0(pVCpu);
6439 pVCpu->cpum.GstCtx.rip = uNewEip;
6440 break;
6441 }
6442
6443 case IEMMODE_64BIT:
6444 {
6445 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6446
6447 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6448 if (!IEM_IS_CANONICAL(uNewRip))
6449 return iemRaiseGeneralProtectionFault0(pVCpu);
6450 pVCpu->cpum.GstCtx.rip = uNewRip;
6451 break;
6452 }
6453
6454 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6455 }
6456
6457 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6458
6459#ifndef IEM_WITH_CODE_TLB
6460 /* Flush the prefetch buffer. */
6461 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6462#endif
6463
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6470 *
6471 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6472 * segment limit.
6473 *
6474 * @returns Strict VBox status code.
6475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6476 * @param offNextInstr The offset of the next instruction.
6477 */
6478IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6479{
6480 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6481
6482 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6483 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6484 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6485 return iemRaiseGeneralProtectionFault0(pVCpu);
6486 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6487 pVCpu->cpum.GstCtx.rip = uNewIp;
6488 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6489
6490#ifndef IEM_WITH_CODE_TLB
6491 /* Flush the prefetch buffer. */
6492 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6493#endif
6494
6495 return VINF_SUCCESS;
6496}
6497
6498
6499/**
6500 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6501 *
6502 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6503 * segment limit.
6504 *
6505 * @returns Strict VBox status code.
6506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6507 * @param offNextInstr The offset of the next instruction.
6508 */
6509IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6510{
6511 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6512
6513 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6514 {
6515 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6516
6517 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6518 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6519 return iemRaiseGeneralProtectionFault0(pVCpu);
6520 pVCpu->cpum.GstCtx.rip = uNewEip;
6521 }
6522 else
6523 {
6524 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6525
6526 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6527 if (!IEM_IS_CANONICAL(uNewRip))
6528 return iemRaiseGeneralProtectionFault0(pVCpu);
6529 pVCpu->cpum.GstCtx.rip = uNewRip;
6530 }
6531 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6532
6533#ifndef IEM_WITH_CODE_TLB
6534 /* Flush the prefetch buffer. */
6535 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6536#endif
6537
6538 return VINF_SUCCESS;
6539}
6540
6541
6542/**
6543 * Performs a near jump to the specified address.
6544 *
6545 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6546 * segment limit.
6547 *
6548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6549 * @param uNewRip The new RIP value.
6550 */
6551IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6552{
6553 switch (pVCpu->iem.s.enmEffOpSize)
6554 {
6555 case IEMMODE_16BIT:
6556 {
6557 Assert(uNewRip <= UINT16_MAX);
6558 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6559 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6560 return iemRaiseGeneralProtectionFault0(pVCpu);
6561 /** @todo Test 16-bit jump in 64-bit mode. */
6562 pVCpu->cpum.GstCtx.rip = uNewRip;
6563 break;
6564 }
6565
6566 case IEMMODE_32BIT:
6567 {
6568 Assert(uNewRip <= UINT32_MAX);
6569 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6570 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6571
6572 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6573 return iemRaiseGeneralProtectionFault0(pVCpu);
6574 pVCpu->cpum.GstCtx.rip = uNewRip;
6575 break;
6576 }
6577
6578 case IEMMODE_64BIT:
6579 {
6580 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6581
6582 if (!IEM_IS_CANONICAL(uNewRip))
6583 return iemRaiseGeneralProtectionFault0(pVCpu);
6584 pVCpu->cpum.GstCtx.rip = uNewRip;
6585 break;
6586 }
6587
6588 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6589 }
6590
6591 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6592
6593#ifndef IEM_WITH_CODE_TLB
6594 /* Flush the prefetch buffer. */
6595 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6596#endif
6597
6598 return VINF_SUCCESS;
6599}
6600
6601
6602/**
6603 * Get the address of the top of the stack.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 */
6607DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6608{
6609 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6610 return pVCpu->cpum.GstCtx.rsp;
6611 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6612 return pVCpu->cpum.GstCtx.esp;
6613 return pVCpu->cpum.GstCtx.sp;
6614}
6615
6616
6617/**
6618 * Updates the RIP/EIP/IP to point to the next instruction.
6619 *
6620 * This function leaves the EFLAGS.RF flag alone.
6621 *
6622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6623 * @param cbInstr The number of bytes to add.
6624 */
6625IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6626{
6627 switch (pVCpu->iem.s.enmCpuMode)
6628 {
6629 case IEMMODE_16BIT:
6630 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6631 pVCpu->cpum.GstCtx.eip += cbInstr;
6632 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6633 break;
6634
6635 case IEMMODE_32BIT:
6636 pVCpu->cpum.GstCtx.eip += cbInstr;
6637 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6638 break;
6639
6640 case IEMMODE_64BIT:
6641 pVCpu->cpum.GstCtx.rip += cbInstr;
6642 break;
6643 default: AssertFailed();
6644 }
6645}
6646
6647
6648#if 0
6649/**
6650 * Updates the RIP/EIP/IP to point to the next instruction.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 */
6654IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6655{
6656 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6657}
6658#endif
6659
6660
6661
6662/**
6663 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6664 *
6665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6666 * @param cbInstr The number of bytes to add.
6667 */
6668IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6669{
6670 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6671
6672 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6673#if ARCH_BITS >= 64
6674 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6675 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6676 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6677#else
6678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6679 pVCpu->cpum.GstCtx.rip += cbInstr;
6680 else
6681 pVCpu->cpum.GstCtx.eip += cbInstr;
6682#endif
6683}
6684
6685
6686/**
6687 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6688 *
6689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6690 */
6691IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6692{
6693 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6694}
6695
6696
6697/**
6698 * Adds to the stack pointer.
6699 *
6700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6701 * @param cbToAdd The number of bytes to add (8-bit!).
6702 */
6703DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6704{
6705 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6706 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6707 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6708 pVCpu->cpum.GstCtx.esp += cbToAdd;
6709 else
6710 pVCpu->cpum.GstCtx.sp += cbToAdd;
6711}
6712
6713
6714/**
6715 * Subtracts from the stack pointer.
6716 *
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param cbToSub The number of bytes to subtract (8-bit!).
6719 */
6720DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6721{
6722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6723 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6724 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6725 pVCpu->cpum.GstCtx.esp -= cbToSub;
6726 else
6727 pVCpu->cpum.GstCtx.sp -= cbToSub;
6728}
6729
6730
6731/**
6732 * Adds to the temporary stack pointer.
6733 *
6734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6735 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6736 * @param cbToAdd The number of bytes to add (16-bit).
6737 */
6738DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6739{
6740 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6741 pTmpRsp->u += cbToAdd;
6742 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6743 pTmpRsp->DWords.dw0 += cbToAdd;
6744 else
6745 pTmpRsp->Words.w0 += cbToAdd;
6746}
6747
6748
6749/**
6750 * Subtracts from the temporary stack pointer.
6751 *
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6754 * @param cbToSub The number of bytes to subtract.
6755 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6756 * expecting that.
6757 */
6758DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6759{
6760 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6761 pTmpRsp->u -= cbToSub;
6762 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6763 pTmpRsp->DWords.dw0 -= cbToSub;
6764 else
6765 pTmpRsp->Words.w0 -= cbToSub;
6766}
6767
6768
6769/**
6770 * Calculates the effective stack address for a push of the specified size as
6771 * well as the new RSP value (upper bits may be masked).
6772 *
6773 * @returns Effective stack addressf for the push.
6774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6775 * @param cbItem The size of the stack item to pop.
6776 * @param puNewRsp Where to return the new RSP value.
6777 */
6778DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6779{
6780 RTUINT64U uTmpRsp;
6781 RTGCPTR GCPtrTop;
6782 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6783
6784 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6785 GCPtrTop = uTmpRsp.u -= cbItem;
6786 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6787 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6788 else
6789 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6790 *puNewRsp = uTmpRsp.u;
6791 return GCPtrTop;
6792}
6793
6794
6795/**
6796 * Gets the current stack pointer and calculates the value after a pop of the
6797 * specified size.
6798 *
6799 * @returns Current stack pointer.
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 * @param cbItem The size of the stack item to pop.
6802 * @param puNewRsp Where to return the new RSP value.
6803 */
6804DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6805{
6806 RTUINT64U uTmpRsp;
6807 RTGCPTR GCPtrTop;
6808 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6809
6810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6811 {
6812 GCPtrTop = uTmpRsp.u;
6813 uTmpRsp.u += cbItem;
6814 }
6815 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6816 {
6817 GCPtrTop = uTmpRsp.DWords.dw0;
6818 uTmpRsp.DWords.dw0 += cbItem;
6819 }
6820 else
6821 {
6822 GCPtrTop = uTmpRsp.Words.w0;
6823 uTmpRsp.Words.w0 += cbItem;
6824 }
6825 *puNewRsp = uTmpRsp.u;
6826 return GCPtrTop;
6827}
6828
6829
6830/**
6831 * Calculates the effective stack address for a push of the specified size as
6832 * well as the new temporary RSP value (upper bits may be masked).
6833 *
6834 * @returns Effective stack addressf for the push.
6835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6836 * @param pTmpRsp The temporary stack pointer. This is updated.
6837 * @param cbItem The size of the stack item to pop.
6838 */
6839DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6840{
6841 RTGCPTR GCPtrTop;
6842
6843 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6844 GCPtrTop = pTmpRsp->u -= cbItem;
6845 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6846 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6847 else
6848 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6849 return GCPtrTop;
6850}
6851
6852
6853/**
6854 * Gets the effective stack address for a pop of the specified size and
6855 * calculates and updates the temporary RSP.
6856 *
6857 * @returns Current stack pointer.
6858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6859 * @param pTmpRsp The temporary stack pointer. This is updated.
6860 * @param cbItem The size of the stack item to pop.
6861 */
6862DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6863{
6864 RTGCPTR GCPtrTop;
6865 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6866 {
6867 GCPtrTop = pTmpRsp->u;
6868 pTmpRsp->u += cbItem;
6869 }
6870 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6871 {
6872 GCPtrTop = pTmpRsp->DWords.dw0;
6873 pTmpRsp->DWords.dw0 += cbItem;
6874 }
6875 else
6876 {
6877 GCPtrTop = pTmpRsp->Words.w0;
6878 pTmpRsp->Words.w0 += cbItem;
6879 }
6880 return GCPtrTop;
6881}
6882
6883/** @} */
6884
6885
6886/** @name FPU access and helpers.
6887 *
6888 * @{
6889 */
6890
6891
6892/**
6893 * Hook for preparing to use the host FPU.
6894 *
6895 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6896 *
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 */
6899DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6900{
6901#ifdef IN_RING3
6902 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6903#else
6904 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6905#endif
6906 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6907}
6908
6909
6910/**
6911 * Hook for preparing to use the host FPU for SSE.
6912 *
6913 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6914 *
6915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6916 */
6917DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6918{
6919 iemFpuPrepareUsage(pVCpu);
6920}
6921
6922
6923/**
6924 * Hook for preparing to use the host FPU for AVX.
6925 *
6926 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6927 *
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 */
6930DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6931{
6932 iemFpuPrepareUsage(pVCpu);
6933}
6934
6935
6936/**
6937 * Hook for actualizing the guest FPU state before the interpreter reads it.
6938 *
6939 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6940 *
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 */
6943DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6944{
6945#ifdef IN_RING3
6946 NOREF(pVCpu);
6947#else
6948 CPUMRZFpuStateActualizeForRead(pVCpu);
6949#endif
6950 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6951}
6952
6953
6954/**
6955 * Hook for actualizing the guest FPU state before the interpreter changes it.
6956 *
6957 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 */
6961DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6962{
6963#ifdef IN_RING3
6964 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6965#else
6966 CPUMRZFpuStateActualizeForChange(pVCpu);
6967#endif
6968 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6969}
6970
6971
6972/**
6973 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6974 * only.
6975 *
6976 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6977 *
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 */
6980DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6981{
6982#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6983 NOREF(pVCpu);
6984#else
6985 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6986#endif
6987 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6988}
6989
6990
6991/**
6992 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6993 * read+write.
6994 *
6995 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6996 *
6997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6998 */
6999DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7000{
7001#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7002 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7003#else
7004 CPUMRZFpuStateActualizeForChange(pVCpu);
7005#endif
7006 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7007}
7008
7009
7010/**
7011 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7012 * only.
7013 *
7014 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7015 *
7016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7017 */
7018DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7019{
7020#ifdef IN_RING3
7021 NOREF(pVCpu);
7022#else
7023 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7024#endif
7025 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7026}
7027
7028
7029/**
7030 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7031 * read+write.
7032 *
7033 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7034 *
7035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7036 */
7037DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7038{
7039#ifdef IN_RING3
7040 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7041#else
7042 CPUMRZFpuStateActualizeForChange(pVCpu);
7043#endif
7044 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7045}
7046
7047
7048/**
7049 * Stores a QNaN value into a FPU register.
7050 *
7051 * @param pReg Pointer to the register.
7052 */
7053DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7054{
7055 pReg->au32[0] = UINT32_C(0x00000000);
7056 pReg->au32[1] = UINT32_C(0xc0000000);
7057 pReg->au16[4] = UINT16_C(0xffff);
7058}
7059
7060
7061/**
7062 * Updates the FOP, FPU.CS and FPUIP registers.
7063 *
7064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7065 * @param pFpuCtx The FPU context.
7066 */
7067DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7068{
7069 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7070 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7071 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7072 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7073 {
7074 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7075 * happens in real mode here based on the fnsave and fnstenv images. */
7076 pFpuCtx->CS = 0;
7077 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7078 }
7079 else
7080 {
7081 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7082 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7083 }
7084}
7085
7086
7087/**
7088 * Updates the x87.DS and FPUDP registers.
7089 *
7090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7091 * @param pFpuCtx The FPU context.
7092 * @param iEffSeg The effective segment register.
7093 * @param GCPtrEff The effective address relative to @a iEffSeg.
7094 */
7095DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7096{
7097 RTSEL sel;
7098 switch (iEffSeg)
7099 {
7100 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7101 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7102 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7103 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7104 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7105 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7106 default:
7107 AssertMsgFailed(("%d\n", iEffSeg));
7108 sel = pVCpu->cpum.GstCtx.ds.Sel;
7109 }
7110 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7111 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7112 {
7113 pFpuCtx->DS = 0;
7114 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7115 }
7116 else
7117 {
7118 pFpuCtx->DS = sel;
7119 pFpuCtx->FPUDP = GCPtrEff;
7120 }
7121}
7122
7123
7124/**
7125 * Rotates the stack registers in the push direction.
7126 *
7127 * @param pFpuCtx The FPU context.
7128 * @remarks This is a complete waste of time, but fxsave stores the registers in
7129 * stack order.
7130 */
7131DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7132{
7133 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7134 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7135 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7136 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7137 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7138 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7139 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7140 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7141 pFpuCtx->aRegs[0].r80 = r80Tmp;
7142}
7143
7144
7145/**
7146 * Rotates the stack registers in the pop direction.
7147 *
7148 * @param pFpuCtx The FPU context.
7149 * @remarks This is a complete waste of time, but fxsave stores the registers in
7150 * stack order.
7151 */
7152DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7153{
7154 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7155 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7156 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7157 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7158 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7159 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7160 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7161 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7162 pFpuCtx->aRegs[7].r80 = r80Tmp;
7163}
7164
7165
7166/**
7167 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7168 * exception prevents it.
7169 *
7170 * @param pResult The FPU operation result to push.
7171 * @param pFpuCtx The FPU context.
7172 */
7173IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7174{
7175 /* Update FSW and bail if there are pending exceptions afterwards. */
7176 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7177 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7178 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7179 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7180 {
7181 pFpuCtx->FSW = fFsw;
7182 return;
7183 }
7184
7185 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7186 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7187 {
7188 /* All is fine, push the actual value. */
7189 pFpuCtx->FTW |= RT_BIT(iNewTop);
7190 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7191 }
7192 else if (pFpuCtx->FCW & X86_FCW_IM)
7193 {
7194 /* Masked stack overflow, push QNaN. */
7195 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7196 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7197 }
7198 else
7199 {
7200 /* Raise stack overflow, don't push anything. */
7201 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7202 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7203 return;
7204 }
7205
7206 fFsw &= ~X86_FSW_TOP_MASK;
7207 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7208 pFpuCtx->FSW = fFsw;
7209
7210 iemFpuRotateStackPush(pFpuCtx);
7211}
7212
7213
7214/**
7215 * Stores a result in a FPU register and updates the FSW and FTW.
7216 *
7217 * @param pFpuCtx The FPU context.
7218 * @param pResult The result to store.
7219 * @param iStReg Which FPU register to store it in.
7220 */
7221IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7222{
7223 Assert(iStReg < 8);
7224 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7225 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7226 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7227 pFpuCtx->FTW |= RT_BIT(iReg);
7228 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7229}
7230
7231
7232/**
7233 * Only updates the FPU status word (FSW) with the result of the current
7234 * instruction.
7235 *
7236 * @param pFpuCtx The FPU context.
7237 * @param u16FSW The FSW output of the current instruction.
7238 */
7239IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7240{
7241 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7242 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7243}
7244
7245
7246/**
7247 * Pops one item off the FPU stack if no pending exception prevents it.
7248 *
7249 * @param pFpuCtx The FPU context.
7250 */
7251IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7252{
7253 /* Check pending exceptions. */
7254 uint16_t uFSW = pFpuCtx->FSW;
7255 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7256 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7257 return;
7258
7259 /* TOP--. */
7260 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7261 uFSW &= ~X86_FSW_TOP_MASK;
7262 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7263 pFpuCtx->FSW = uFSW;
7264
7265 /* Mark the previous ST0 as empty. */
7266 iOldTop >>= X86_FSW_TOP_SHIFT;
7267 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7268
7269 /* Rotate the registers. */
7270 iemFpuRotateStackPop(pFpuCtx);
7271}
7272
7273
7274/**
7275 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7276 *
7277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7278 * @param pResult The FPU operation result to push.
7279 */
7280IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7281{
7282 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7284 iemFpuMaybePushResult(pResult, pFpuCtx);
7285}
7286
7287
7288/**
7289 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7290 * and sets FPUDP and FPUDS.
7291 *
7292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7293 * @param pResult The FPU operation result to push.
7294 * @param iEffSeg The effective segment register.
7295 * @param GCPtrEff The effective address relative to @a iEffSeg.
7296 */
7297IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7298{
7299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7300 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7301 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7302 iemFpuMaybePushResult(pResult, pFpuCtx);
7303}
7304
7305
7306/**
7307 * Replace ST0 with the first value and push the second onto the FPU stack,
7308 * unless a pending exception prevents it.
7309 *
7310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7311 * @param pResult The FPU operation result to store and push.
7312 */
7313IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7314{
7315 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7316 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7317
7318 /* Update FSW and bail if there are pending exceptions afterwards. */
7319 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7320 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7321 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7322 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7323 {
7324 pFpuCtx->FSW = fFsw;
7325 return;
7326 }
7327
7328 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7329 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7330 {
7331 /* All is fine, push the actual value. */
7332 pFpuCtx->FTW |= RT_BIT(iNewTop);
7333 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7334 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7335 }
7336 else if (pFpuCtx->FCW & X86_FCW_IM)
7337 {
7338 /* Masked stack overflow, push QNaN. */
7339 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7340 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7341 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7342 }
7343 else
7344 {
7345 /* Raise stack overflow, don't push anything. */
7346 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7347 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7348 return;
7349 }
7350
7351 fFsw &= ~X86_FSW_TOP_MASK;
7352 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7353 pFpuCtx->FSW = fFsw;
7354
7355 iemFpuRotateStackPush(pFpuCtx);
7356}
7357
7358
7359/**
7360 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7361 * FOP.
7362 *
7363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7364 * @param pResult The result to store.
7365 * @param iStReg Which FPU register to store it in.
7366 */
7367IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7368{
7369 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7370 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7371 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7372}
7373
7374
7375/**
7376 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7377 * FOP, and then pops the stack.
7378 *
7379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7380 * @param pResult The result to store.
7381 * @param iStReg Which FPU register to store it in.
7382 */
7383IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7384{
7385 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7386 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7387 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7388 iemFpuMaybePopOne(pFpuCtx);
7389}
7390
7391
7392/**
7393 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7394 * FPUDP, and FPUDS.
7395 *
7396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7397 * @param pResult The result to store.
7398 * @param iStReg Which FPU register to store it in.
7399 * @param iEffSeg The effective memory operand selector register.
7400 * @param GCPtrEff The effective memory operand offset.
7401 */
7402IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7403 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7404{
7405 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7406 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7407 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7408 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7409}
7410
7411
7412/**
7413 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7414 * FPUDP, and FPUDS, and then pops the stack.
7415 *
7416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7417 * @param pResult The result to store.
7418 * @param iStReg Which FPU register to store it in.
7419 * @param iEffSeg The effective memory operand selector register.
7420 * @param GCPtrEff The effective memory operand offset.
7421 */
7422IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7423 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7424{
7425 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7426 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7427 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7428 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7429 iemFpuMaybePopOne(pFpuCtx);
7430}
7431
7432
7433/**
7434 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7435 *
7436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7437 */
7438IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7439{
7440 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7441 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7442}
7443
7444
7445/**
7446 * Marks the specified stack register as free (for FFREE).
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 * @param iStReg The register to free.
7450 */
7451IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7452{
7453 Assert(iStReg < 8);
7454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7455 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7456 pFpuCtx->FTW &= ~RT_BIT(iReg);
7457}
7458
7459
7460/**
7461 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7462 *
7463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7464 */
7465IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7466{
7467 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7468 uint16_t uFsw = pFpuCtx->FSW;
7469 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7470 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7471 uFsw &= ~X86_FSW_TOP_MASK;
7472 uFsw |= uTop;
7473 pFpuCtx->FSW = uFsw;
7474}
7475
7476
7477/**
7478 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7479 *
7480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7481 */
7482IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7483{
7484 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7485 uint16_t uFsw = pFpuCtx->FSW;
7486 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7487 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7488 uFsw &= ~X86_FSW_TOP_MASK;
7489 uFsw |= uTop;
7490 pFpuCtx->FSW = uFsw;
7491}
7492
7493
7494/**
7495 * Updates the FSW, FOP, FPUIP, and FPUCS.
7496 *
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param u16FSW The FSW from the current instruction.
7499 */
7500IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7501{
7502 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505}
7506
7507
7508/**
7509 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7510 *
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param u16FSW The FSW from the current instruction.
7513 */
7514IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7515{
7516 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7517 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7518 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7519 iemFpuMaybePopOne(pFpuCtx);
7520}
7521
7522
7523/**
7524 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7525 *
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param u16FSW The FSW from the current instruction.
7528 * @param iEffSeg The effective memory operand selector register.
7529 * @param GCPtrEff The effective memory operand offset.
7530 */
7531IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7532{
7533 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7534 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7535 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7536 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7537}
7538
7539
7540/**
7541 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7542 *
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 * @param u16FSW The FSW from the current instruction.
7545 */
7546IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7547{
7548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7549 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7550 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7551 iemFpuMaybePopOne(pFpuCtx);
7552 iemFpuMaybePopOne(pFpuCtx);
7553}
7554
7555
7556/**
7557 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7558 *
7559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7560 * @param u16FSW The FSW from the current instruction.
7561 * @param iEffSeg The effective memory operand selector register.
7562 * @param GCPtrEff The effective memory operand offset.
7563 */
7564IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7565{
7566 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7567 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7568 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7569 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7570 iemFpuMaybePopOne(pFpuCtx);
7571}
7572
7573
7574/**
7575 * Worker routine for raising an FPU stack underflow exception.
7576 *
7577 * @param pFpuCtx The FPU context.
7578 * @param iStReg The stack register being accessed.
7579 */
7580IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7581{
7582 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7583 if (pFpuCtx->FCW & X86_FCW_IM)
7584 {
7585 /* Masked underflow. */
7586 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7587 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7588 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7589 if (iStReg != UINT8_MAX)
7590 {
7591 pFpuCtx->FTW |= RT_BIT(iReg);
7592 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7593 }
7594 }
7595 else
7596 {
7597 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7598 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7599 }
7600}
7601
7602
7603/**
7604 * Raises a FPU stack underflow exception.
7605 *
7606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7607 * @param iStReg The destination register that should be loaded
7608 * with QNaN if \#IS is not masked. Specify
7609 * UINT8_MAX if none (like for fcom).
7610 */
7611DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7612{
7613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7614 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7615 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void)
7620iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7621{
7622 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7623 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7625 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7626}
7627
7628
7629DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7630{
7631 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7632 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7633 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7634 iemFpuMaybePopOne(pFpuCtx);
7635}
7636
7637
7638DECL_NO_INLINE(IEM_STATIC, void)
7639iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7640{
7641 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7642 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7643 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7644 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7645 iemFpuMaybePopOne(pFpuCtx);
7646}
7647
7648
7649DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7650{
7651 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7653 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7654 iemFpuMaybePopOne(pFpuCtx);
7655 iemFpuMaybePopOne(pFpuCtx);
7656}
7657
7658
7659DECL_NO_INLINE(IEM_STATIC, void)
7660iemFpuStackPushUnderflow(PVMCPU pVCpu)
7661{
7662 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7663 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7664
7665 if (pFpuCtx->FCW & X86_FCW_IM)
7666 {
7667 /* Masked overflow - Push QNaN. */
7668 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7669 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7670 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7671 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7672 pFpuCtx->FTW |= RT_BIT(iNewTop);
7673 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7674 iemFpuRotateStackPush(pFpuCtx);
7675 }
7676 else
7677 {
7678 /* Exception pending - don't change TOP or the register stack. */
7679 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7680 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7681 }
7682}
7683
7684
7685DECL_NO_INLINE(IEM_STATIC, void)
7686iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7687{
7688 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7689 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7690
7691 if (pFpuCtx->FCW & X86_FCW_IM)
7692 {
7693 /* Masked overflow - Push QNaN. */
7694 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7695 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7696 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7697 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7698 pFpuCtx->FTW |= RT_BIT(iNewTop);
7699 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7700 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7701 iemFpuRotateStackPush(pFpuCtx);
7702 }
7703 else
7704 {
7705 /* Exception pending - don't change TOP or the register stack. */
7706 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7707 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7708 }
7709}
7710
7711
7712/**
7713 * Worker routine for raising an FPU stack overflow exception on a push.
7714 *
7715 * @param pFpuCtx The FPU context.
7716 */
7717IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7718{
7719 if (pFpuCtx->FCW & X86_FCW_IM)
7720 {
7721 /* Masked overflow. */
7722 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7723 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7724 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7725 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7726 pFpuCtx->FTW |= RT_BIT(iNewTop);
7727 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7728 iemFpuRotateStackPush(pFpuCtx);
7729 }
7730 else
7731 {
7732 /* Exception pending - don't change TOP or the register stack. */
7733 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7734 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7735 }
7736}
7737
7738
7739/**
7740 * Raises a FPU stack overflow exception on a push.
7741 *
7742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7743 */
7744DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7745{
7746 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7747 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7748 iemFpuStackPushOverflowOnly(pFpuCtx);
7749}
7750
7751
7752/**
7753 * Raises a FPU stack overflow exception on a push with a memory operand.
7754 *
7755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7756 * @param iEffSeg The effective memory operand selector register.
7757 * @param GCPtrEff The effective memory operand offset.
7758 */
7759DECL_NO_INLINE(IEM_STATIC, void)
7760iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7761{
7762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7763 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7764 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7765 iemFpuStackPushOverflowOnly(pFpuCtx);
7766}
7767
7768
7769IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7770{
7771 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7772 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7773 if (pFpuCtx->FTW & RT_BIT(iReg))
7774 return VINF_SUCCESS;
7775 return VERR_NOT_FOUND;
7776}
7777
7778
7779IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7780{
7781 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7782 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7783 if (pFpuCtx->FTW & RT_BIT(iReg))
7784 {
7785 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7786 return VINF_SUCCESS;
7787 }
7788 return VERR_NOT_FOUND;
7789}
7790
7791
7792IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7793 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7794{
7795 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7796 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7797 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7798 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7799 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7800 {
7801 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7802 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7803 return VINF_SUCCESS;
7804 }
7805 return VERR_NOT_FOUND;
7806}
7807
7808
7809IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7810{
7811 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7812 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7813 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7814 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7815 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7816 {
7817 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7818 return VINF_SUCCESS;
7819 }
7820 return VERR_NOT_FOUND;
7821}
7822
7823
7824/**
7825 * Updates the FPU exception status after FCW is changed.
7826 *
7827 * @param pFpuCtx The FPU context.
7828 */
7829IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7830{
7831 uint16_t u16Fsw = pFpuCtx->FSW;
7832 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7833 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7834 else
7835 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7836 pFpuCtx->FSW = u16Fsw;
7837}
7838
7839
7840/**
7841 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7842 *
7843 * @returns The full FTW.
7844 * @param pFpuCtx The FPU context.
7845 */
7846IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7847{
7848 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7849 uint16_t u16Ftw = 0;
7850 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7851 for (unsigned iSt = 0; iSt < 8; iSt++)
7852 {
7853 unsigned const iReg = (iSt + iTop) & 7;
7854 if (!(u8Ftw & RT_BIT(iReg)))
7855 u16Ftw |= 3 << (iReg * 2); /* empty */
7856 else
7857 {
7858 uint16_t uTag;
7859 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7860 if (pr80Reg->s.uExponent == 0x7fff)
7861 uTag = 2; /* Exponent is all 1's => Special. */
7862 else if (pr80Reg->s.uExponent == 0x0000)
7863 {
7864 if (pr80Reg->s.u64Mantissa == 0x0000)
7865 uTag = 1; /* All bits are zero => Zero. */
7866 else
7867 uTag = 2; /* Must be special. */
7868 }
7869 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7870 uTag = 0; /* Valid. */
7871 else
7872 uTag = 2; /* Must be special. */
7873
7874 u16Ftw |= uTag << (iReg * 2); /* empty */
7875 }
7876 }
7877
7878 return u16Ftw;
7879}
7880
7881
7882/**
7883 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7884 *
7885 * @returns The compressed FTW.
7886 * @param u16FullFtw The full FTW to convert.
7887 */
7888IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7889{
7890 uint8_t u8Ftw = 0;
7891 for (unsigned i = 0; i < 8; i++)
7892 {
7893 if ((u16FullFtw & 3) != 3 /*empty*/)
7894 u8Ftw |= RT_BIT(i);
7895 u16FullFtw >>= 2;
7896 }
7897
7898 return u8Ftw;
7899}
7900
7901/** @} */
7902
7903
7904/** @name Memory access.
7905 *
7906 * @{
7907 */
7908
7909
7910/**
7911 * Updates the IEMCPU::cbWritten counter if applicable.
7912 *
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param fAccess The access being accounted for.
7915 * @param cbMem The access size.
7916 */
7917DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7918{
7919 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7920 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7921 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7922}
7923
7924
7925/**
7926 * Checks if the given segment can be written to, raise the appropriate
7927 * exception if not.
7928 *
7929 * @returns VBox strict status code.
7930 *
7931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7932 * @param pHid Pointer to the hidden register.
7933 * @param iSegReg The register number.
7934 * @param pu64BaseAddr Where to return the base address to use for the
7935 * segment. (In 64-bit code it may differ from the
7936 * base in the hidden segment.)
7937 */
7938IEM_STATIC VBOXSTRICTRC
7939iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7940{
7941 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7942
7943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7944 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7945 else
7946 {
7947 if (!pHid->Attr.n.u1Present)
7948 {
7949 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7950 AssertRelease(uSel == 0);
7951 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7952 return iemRaiseGeneralProtectionFault0(pVCpu);
7953 }
7954
7955 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7956 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7957 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7958 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7959 *pu64BaseAddr = pHid->u64Base;
7960 }
7961 return VINF_SUCCESS;
7962}
7963
7964
7965/**
7966 * Checks if the given segment can be read from, raise the appropriate
7967 * exception if not.
7968 *
7969 * @returns VBox strict status code.
7970 *
7971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7972 * @param pHid Pointer to the hidden register.
7973 * @param iSegReg The register number.
7974 * @param pu64BaseAddr Where to return the base address to use for the
7975 * segment. (In 64-bit code it may differ from the
7976 * base in the hidden segment.)
7977 */
7978IEM_STATIC VBOXSTRICTRC
7979iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7980{
7981 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7982
7983 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7984 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7985 else
7986 {
7987 if (!pHid->Attr.n.u1Present)
7988 {
7989 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7990 AssertRelease(uSel == 0);
7991 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7992 return iemRaiseGeneralProtectionFault0(pVCpu);
7993 }
7994
7995 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7996 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7997 *pu64BaseAddr = pHid->u64Base;
7998 }
7999 return VINF_SUCCESS;
8000}
8001
8002
8003/**
8004 * Applies the segment limit, base and attributes.
8005 *
8006 * This may raise a \#GP or \#SS.
8007 *
8008 * @returns VBox strict status code.
8009 *
8010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8011 * @param fAccess The kind of access which is being performed.
8012 * @param iSegReg The index of the segment register to apply.
8013 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8014 * TSS, ++).
8015 * @param cbMem The access size.
8016 * @param pGCPtrMem Pointer to the guest memory address to apply
8017 * segmentation to. Input and output parameter.
8018 */
8019IEM_STATIC VBOXSTRICTRC
8020iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8021{
8022 if (iSegReg == UINT8_MAX)
8023 return VINF_SUCCESS;
8024
8025 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8026 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8027 switch (pVCpu->iem.s.enmCpuMode)
8028 {
8029 case IEMMODE_16BIT:
8030 case IEMMODE_32BIT:
8031 {
8032 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8033 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8034
8035 if ( pSel->Attr.n.u1Present
8036 && !pSel->Attr.n.u1Unusable)
8037 {
8038 Assert(pSel->Attr.n.u1DescType);
8039 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8040 {
8041 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8042 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8043 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8044
8045 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8046 {
8047 /** @todo CPL check. */
8048 }
8049
8050 /*
8051 * There are two kinds of data selectors, normal and expand down.
8052 */
8053 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8054 {
8055 if ( GCPtrFirst32 > pSel->u32Limit
8056 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8057 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8058 }
8059 else
8060 {
8061 /*
8062 * The upper boundary is defined by the B bit, not the G bit!
8063 */
8064 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8065 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8066 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8067 }
8068 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8069 }
8070 else
8071 {
8072
8073 /*
8074 * Code selector and usually be used to read thru, writing is
8075 * only permitted in real and V8086 mode.
8076 */
8077 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8078 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8079 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8080 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8081 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8082
8083 if ( GCPtrFirst32 > pSel->u32Limit
8084 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8085 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8086
8087 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8088 {
8089 /** @todo CPL check. */
8090 }
8091
8092 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8093 }
8094 }
8095 else
8096 return iemRaiseGeneralProtectionFault0(pVCpu);
8097 return VINF_SUCCESS;
8098 }
8099
8100 case IEMMODE_64BIT:
8101 {
8102 RTGCPTR GCPtrMem = *pGCPtrMem;
8103 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8104 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8105
8106 Assert(cbMem >= 1);
8107 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8108 return VINF_SUCCESS;
8109 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8110 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8111 return iemRaiseGeneralProtectionFault0(pVCpu);
8112 }
8113
8114 default:
8115 AssertFailedReturn(VERR_IEM_IPE_7);
8116 }
8117}
8118
8119
8120/**
8121 * Translates a virtual address to a physical physical address and checks if we
8122 * can access the page as specified.
8123 *
8124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8125 * @param GCPtrMem The virtual address.
8126 * @param fAccess The intended access.
8127 * @param pGCPhysMem Where to return the physical address.
8128 */
8129IEM_STATIC VBOXSTRICTRC
8130iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8131{
8132 /** @todo Need a different PGM interface here. We're currently using
8133 * generic / REM interfaces. this won't cut it for R0 & RC. */
8134 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8135 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8136 RTGCPHYS GCPhys;
8137 uint64_t fFlags;
8138 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8139 if (RT_FAILURE(rc))
8140 {
8141 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8142 /** @todo Check unassigned memory in unpaged mode. */
8143 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8144 *pGCPhysMem = NIL_RTGCPHYS;
8145 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8146 }
8147
8148 /* If the page is writable and does not have the no-exec bit set, all
8149 access is allowed. Otherwise we'll have to check more carefully... */
8150 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8151 {
8152 /* Write to read only memory? */
8153 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8154 && !(fFlags & X86_PTE_RW)
8155 && ( (pVCpu->iem.s.uCpl == 3
8156 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8157 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8158 {
8159 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8160 *pGCPhysMem = NIL_RTGCPHYS;
8161 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8162 }
8163
8164 /* Kernel memory accessed by userland? */
8165 if ( !(fFlags & X86_PTE_US)
8166 && pVCpu->iem.s.uCpl == 3
8167 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8168 {
8169 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8170 *pGCPhysMem = NIL_RTGCPHYS;
8171 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8172 }
8173
8174 /* Executing non-executable memory? */
8175 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8176 && (fFlags & X86_PTE_PAE_NX)
8177 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8178 {
8179 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8180 *pGCPhysMem = NIL_RTGCPHYS;
8181 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8182 VERR_ACCESS_DENIED);
8183 }
8184 }
8185
8186 /*
8187 * Set the dirty / access flags.
8188 * ASSUMES this is set when the address is translated rather than on committ...
8189 */
8190 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8191 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8192 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8193 {
8194 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8195 AssertRC(rc2);
8196 }
8197
8198 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8199 *pGCPhysMem = GCPhys;
8200 return VINF_SUCCESS;
8201}
8202
8203
8204
8205/**
8206 * Maps a physical page.
8207 *
8208 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8210 * @param GCPhysMem The physical address.
8211 * @param fAccess The intended access.
8212 * @param ppvMem Where to return the mapping address.
8213 * @param pLock The PGM lock.
8214 */
8215IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8216{
8217#ifdef IEM_LOG_MEMORY_WRITES
8218 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8219 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8220#endif
8221
8222 /** @todo This API may require some improving later. A private deal with PGM
8223 * regarding locking and unlocking needs to be struct. A couple of TLBs
8224 * living in PGM, but with publicly accessible inlined access methods
8225 * could perhaps be an even better solution. */
8226 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8227 GCPhysMem,
8228 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8229 pVCpu->iem.s.fBypassHandlers,
8230 ppvMem,
8231 pLock);
8232 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8233 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8234
8235 return rc;
8236}
8237
8238
8239/**
8240 * Unmap a page previously mapped by iemMemPageMap.
8241 *
8242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8243 * @param GCPhysMem The physical address.
8244 * @param fAccess The intended access.
8245 * @param pvMem What iemMemPageMap returned.
8246 * @param pLock The PGM lock.
8247 */
8248DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8249{
8250 NOREF(pVCpu);
8251 NOREF(GCPhysMem);
8252 NOREF(fAccess);
8253 NOREF(pvMem);
8254 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8255}
8256
8257
8258/**
8259 * Looks up a memory mapping entry.
8260 *
8261 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8263 * @param pvMem The memory address.
8264 * @param fAccess The access to.
8265 */
8266DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8267{
8268 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8269 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8270 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8271 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8272 return 0;
8273 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8274 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8275 return 1;
8276 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8277 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8278 return 2;
8279 return VERR_NOT_FOUND;
8280}
8281
8282
8283/**
8284 * Finds a free memmap entry when using iNextMapping doesn't work.
8285 *
8286 * @returns Memory mapping index, 1024 on failure.
8287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8288 */
8289IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8290{
8291 /*
8292 * The easy case.
8293 */
8294 if (pVCpu->iem.s.cActiveMappings == 0)
8295 {
8296 pVCpu->iem.s.iNextMapping = 1;
8297 return 0;
8298 }
8299
8300 /* There should be enough mappings for all instructions. */
8301 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8302
8303 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8304 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8305 return i;
8306
8307 AssertFailedReturn(1024);
8308}
8309
8310
8311/**
8312 * Commits a bounce buffer that needs writing back and unmaps it.
8313 *
8314 * @returns Strict VBox status code.
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 * @param iMemMap The index of the buffer to commit.
8317 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8318 * Always false in ring-3, obviously.
8319 */
8320IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8321{
8322 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8323 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8324#ifdef IN_RING3
8325 Assert(!fPostponeFail);
8326 RT_NOREF_PV(fPostponeFail);
8327#endif
8328
8329 /*
8330 * Do the writing.
8331 */
8332 PVM pVM = pVCpu->CTX_SUFF(pVM);
8333 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8334 {
8335 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8336 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8337 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8338 if (!pVCpu->iem.s.fBypassHandlers)
8339 {
8340 /*
8341 * Carefully and efficiently dealing with access handler return
8342 * codes make this a little bloated.
8343 */
8344 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8346 pbBuf,
8347 cbFirst,
8348 PGMACCESSORIGIN_IEM);
8349 if (rcStrict == VINF_SUCCESS)
8350 {
8351 if (cbSecond)
8352 {
8353 rcStrict = PGMPhysWrite(pVM,
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8355 pbBuf + cbFirst,
8356 cbSecond,
8357 PGMACCESSORIGIN_IEM);
8358 if (rcStrict == VINF_SUCCESS)
8359 { /* nothing */ }
8360 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8361 {
8362 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8364 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8365 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8366 }
8367#ifndef IN_RING3
8368 else if (fPostponeFail)
8369 {
8370 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8372 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8373 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8374 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8375 return iemSetPassUpStatus(pVCpu, rcStrict);
8376 }
8377#endif
8378 else
8379 {
8380 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8382 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8383 return rcStrict;
8384 }
8385 }
8386 }
8387 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8388 {
8389 if (!cbSecond)
8390 {
8391 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8393 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8394 }
8395 else
8396 {
8397 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8398 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8399 pbBuf + cbFirst,
8400 cbSecond,
8401 PGMACCESSORIGIN_IEM);
8402 if (rcStrict2 == VINF_SUCCESS)
8403 {
8404 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8406 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8407 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8408 }
8409 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8410 {
8411 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8413 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8414 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8415 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8416 }
8417#ifndef IN_RING3
8418 else if (fPostponeFail)
8419 {
8420 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8422 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8424 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8425 return iemSetPassUpStatus(pVCpu, rcStrict);
8426 }
8427#endif
8428 else
8429 {
8430 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8433 return rcStrict2;
8434 }
8435 }
8436 }
8437#ifndef IN_RING3
8438 else if (fPostponeFail)
8439 {
8440 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8443 if (!cbSecond)
8444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8445 else
8446 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8447 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8448 return iemSetPassUpStatus(pVCpu, rcStrict);
8449 }
8450#endif
8451 else
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8456 return rcStrict;
8457 }
8458 }
8459 else
8460 {
8461 /*
8462 * No access handlers, much simpler.
8463 */
8464 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8465 if (RT_SUCCESS(rc))
8466 {
8467 if (cbSecond)
8468 {
8469 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8470 if (RT_SUCCESS(rc))
8471 { /* likely */ }
8472 else
8473 {
8474 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8477 return rc;
8478 }
8479 }
8480 }
8481 else
8482 {
8483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8486 return rc;
8487 }
8488 }
8489 }
8490
8491#if defined(IEM_LOG_MEMORY_WRITES)
8492 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8493 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8494 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8495 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8496 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8497 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8498
8499 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8500 g_cbIemWrote = cbWrote;
8501 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8502#endif
8503
8504 /*
8505 * Free the mapping entry.
8506 */
8507 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8508 Assert(pVCpu->iem.s.cActiveMappings != 0);
8509 pVCpu->iem.s.cActiveMappings--;
8510 return VINF_SUCCESS;
8511}
8512
8513
8514/**
8515 * iemMemMap worker that deals with a request crossing pages.
8516 */
8517IEM_STATIC VBOXSTRICTRC
8518iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8519{
8520 /*
8521 * Do the address translations.
8522 */
8523 RTGCPHYS GCPhysFirst;
8524 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8525 if (rcStrict != VINF_SUCCESS)
8526 return rcStrict;
8527
8528 RTGCPHYS GCPhysSecond;
8529 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8530 fAccess, &GCPhysSecond);
8531 if (rcStrict != VINF_SUCCESS)
8532 return rcStrict;
8533 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8534
8535 PVM pVM = pVCpu->CTX_SUFF(pVM);
8536
8537 /*
8538 * Read in the current memory content if it's a read, execute or partial
8539 * write access.
8540 */
8541 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8542 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8543 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8544
8545 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8546 {
8547 if (!pVCpu->iem.s.fBypassHandlers)
8548 {
8549 /*
8550 * Must carefully deal with access handler status codes here,
8551 * makes the code a bit bloated.
8552 */
8553 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8554 if (rcStrict == VINF_SUCCESS)
8555 {
8556 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8557 if (rcStrict == VINF_SUCCESS)
8558 { /*likely */ }
8559 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8560 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8561 else
8562 {
8563 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8564 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8565 return rcStrict;
8566 }
8567 }
8568 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8569 {
8570 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8571 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8572 {
8573 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8574 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8575 }
8576 else
8577 {
8578 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8579 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8580 return rcStrict2;
8581 }
8582 }
8583 else
8584 {
8585 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8586 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8587 return rcStrict;
8588 }
8589 }
8590 else
8591 {
8592 /*
8593 * No informational status codes here, much more straight forward.
8594 */
8595 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8596 if (RT_SUCCESS(rc))
8597 {
8598 Assert(rc == VINF_SUCCESS);
8599 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8600 if (RT_SUCCESS(rc))
8601 Assert(rc == VINF_SUCCESS);
8602 else
8603 {
8604 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8605 return rc;
8606 }
8607 }
8608 else
8609 {
8610 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8611 return rc;
8612 }
8613 }
8614 }
8615#ifdef VBOX_STRICT
8616 else
8617 memset(pbBuf, 0xcc, cbMem);
8618 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8619 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8620#endif
8621
8622 /*
8623 * Commit the bounce buffer entry.
8624 */
8625 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8626 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8629 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8630 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8631 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8632 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8633 pVCpu->iem.s.cActiveMappings++;
8634
8635 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8636 *ppvMem = pbBuf;
8637 return VINF_SUCCESS;
8638}
8639
8640
8641/**
8642 * iemMemMap woker that deals with iemMemPageMap failures.
8643 */
8644IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8645 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8646{
8647 /*
8648 * Filter out conditions we can handle and the ones which shouldn't happen.
8649 */
8650 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8651 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8652 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8653 {
8654 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8655 return rcMap;
8656 }
8657 pVCpu->iem.s.cPotentialExits++;
8658
8659 /*
8660 * Read in the current memory content if it's a read, execute or partial
8661 * write access.
8662 */
8663 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8664 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8665 {
8666 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8667 memset(pbBuf, 0xff, cbMem);
8668 else
8669 {
8670 int rc;
8671 if (!pVCpu->iem.s.fBypassHandlers)
8672 {
8673 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8674 if (rcStrict == VINF_SUCCESS)
8675 { /* nothing */ }
8676 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8677 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8678 else
8679 {
8680 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8681 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8682 return rcStrict;
8683 }
8684 }
8685 else
8686 {
8687 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8688 if (RT_SUCCESS(rc))
8689 { /* likely */ }
8690 else
8691 {
8692 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8693 GCPhysFirst, rc));
8694 return rc;
8695 }
8696 }
8697 }
8698 }
8699#ifdef VBOX_STRICT
8700 else
8701 memset(pbBuf, 0xcc, cbMem);
8702#endif
8703#ifdef VBOX_STRICT
8704 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8705 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8706#endif
8707
8708 /*
8709 * Commit the bounce buffer entry.
8710 */
8711 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8712 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8713 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8716 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8717 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8718 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8719 pVCpu->iem.s.cActiveMappings++;
8720
8721 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8722 *ppvMem = pbBuf;
8723 return VINF_SUCCESS;
8724}
8725
8726
8727
8728/**
8729 * Maps the specified guest memory for the given kind of access.
8730 *
8731 * This may be using bounce buffering of the memory if it's crossing a page
8732 * boundary or if there is an access handler installed for any of it. Because
8733 * of lock prefix guarantees, we're in for some extra clutter when this
8734 * happens.
8735 *
8736 * This may raise a \#GP, \#SS, \#PF or \#AC.
8737 *
8738 * @returns VBox strict status code.
8739 *
8740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8741 * @param ppvMem Where to return the pointer to the mapped
8742 * memory.
8743 * @param cbMem The number of bytes to map. This is usually 1,
8744 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8745 * string operations it can be up to a page.
8746 * @param iSegReg The index of the segment register to use for
8747 * this access. The base and limits are checked.
8748 * Use UINT8_MAX to indicate that no segmentation
8749 * is required (for IDT, GDT and LDT accesses).
8750 * @param GCPtrMem The address of the guest memory.
8751 * @param fAccess How the memory is being accessed. The
8752 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8753 * how to map the memory, while the
8754 * IEM_ACCESS_WHAT_XXX bit is used when raising
8755 * exceptions.
8756 */
8757IEM_STATIC VBOXSTRICTRC
8758iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8759{
8760 /*
8761 * Check the input and figure out which mapping entry to use.
8762 */
8763 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8764 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8765 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8766
8767 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8768 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8769 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8770 {
8771 iMemMap = iemMemMapFindFree(pVCpu);
8772 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8773 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8774 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8775 pVCpu->iem.s.aMemMappings[2].fAccess),
8776 VERR_IEM_IPE_9);
8777 }
8778
8779 /*
8780 * Map the memory, checking that we can actually access it. If something
8781 * slightly complicated happens, fall back on bounce buffering.
8782 */
8783 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8784 if (rcStrict != VINF_SUCCESS)
8785 return rcStrict;
8786
8787 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8788 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8789
8790 RTGCPHYS GCPhysFirst;
8791 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8792 if (rcStrict != VINF_SUCCESS)
8793 return rcStrict;
8794
8795 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8796 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8797 if (fAccess & IEM_ACCESS_TYPE_READ)
8798 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8799
8800 void *pvMem;
8801 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8802 if (rcStrict != VINF_SUCCESS)
8803 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8804
8805 /*
8806 * Fill in the mapping table entry.
8807 */
8808 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8809 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8810 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8811 pVCpu->iem.s.cActiveMappings++;
8812
8813 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8814 *ppvMem = pvMem;
8815 return VINF_SUCCESS;
8816}
8817
8818
8819/**
8820 * Commits the guest memory if bounce buffered and unmaps it.
8821 *
8822 * @returns Strict VBox status code.
8823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8824 * @param pvMem The mapping.
8825 * @param fAccess The kind of access.
8826 */
8827IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8828{
8829 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8830 AssertReturn(iMemMap >= 0, iMemMap);
8831
8832 /* If it's bounce buffered, we may need to write back the buffer. */
8833 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8834 {
8835 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8836 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8837 }
8838 /* Otherwise unlock it. */
8839 else
8840 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8841
8842 /* Free the entry. */
8843 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8844 Assert(pVCpu->iem.s.cActiveMappings != 0);
8845 pVCpu->iem.s.cActiveMappings--;
8846 return VINF_SUCCESS;
8847}
8848
8849#ifdef IEM_WITH_SETJMP
8850
8851/**
8852 * Maps the specified guest memory for the given kind of access, longjmp on
8853 * error.
8854 *
8855 * This may be using bounce buffering of the memory if it's crossing a page
8856 * boundary or if there is an access handler installed for any of it. Because
8857 * of lock prefix guarantees, we're in for some extra clutter when this
8858 * happens.
8859 *
8860 * This may raise a \#GP, \#SS, \#PF or \#AC.
8861 *
8862 * @returns Pointer to the mapped memory.
8863 *
8864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8865 * @param cbMem The number of bytes to map. This is usually 1,
8866 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8867 * string operations it can be up to a page.
8868 * @param iSegReg The index of the segment register to use for
8869 * this access. The base and limits are checked.
8870 * Use UINT8_MAX to indicate that no segmentation
8871 * is required (for IDT, GDT and LDT accesses).
8872 * @param GCPtrMem The address of the guest memory.
8873 * @param fAccess How the memory is being accessed. The
8874 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8875 * how to map the memory, while the
8876 * IEM_ACCESS_WHAT_XXX bit is used when raising
8877 * exceptions.
8878 */
8879IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8880{
8881 /*
8882 * Check the input and figure out which mapping entry to use.
8883 */
8884 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8885 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8886 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8887
8888 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8889 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8890 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8891 {
8892 iMemMap = iemMemMapFindFree(pVCpu);
8893 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8894 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8895 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8896 pVCpu->iem.s.aMemMappings[2].fAccess),
8897 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8898 }
8899
8900 /*
8901 * Map the memory, checking that we can actually access it. If something
8902 * slightly complicated happens, fall back on bounce buffering.
8903 */
8904 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8905 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8906 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8907
8908 /* Crossing a page boundary? */
8909 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8910 { /* No (likely). */ }
8911 else
8912 {
8913 void *pvMem;
8914 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8915 if (rcStrict == VINF_SUCCESS)
8916 return pvMem;
8917 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8918 }
8919
8920 RTGCPHYS GCPhysFirst;
8921 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8922 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8923 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8924
8925 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8926 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8927 if (fAccess & IEM_ACCESS_TYPE_READ)
8928 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8929
8930 void *pvMem;
8931 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8932 if (rcStrict == VINF_SUCCESS)
8933 { /* likely */ }
8934 else
8935 {
8936 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8937 if (rcStrict == VINF_SUCCESS)
8938 return pvMem;
8939 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8940 }
8941
8942 /*
8943 * Fill in the mapping table entry.
8944 */
8945 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8946 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8947 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8948 pVCpu->iem.s.cActiveMappings++;
8949
8950 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8951 return pvMem;
8952}
8953
8954
8955/**
8956 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8957 *
8958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8959 * @param pvMem The mapping.
8960 * @param fAccess The kind of access.
8961 */
8962IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8963{
8964 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8965 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8966
8967 /* If it's bounce buffered, we may need to write back the buffer. */
8968 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8969 {
8970 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8971 {
8972 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8973 if (rcStrict == VINF_SUCCESS)
8974 return;
8975 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8976 }
8977 }
8978 /* Otherwise unlock it. */
8979 else
8980 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8981
8982 /* Free the entry. */
8983 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8984 Assert(pVCpu->iem.s.cActiveMappings != 0);
8985 pVCpu->iem.s.cActiveMappings--;
8986}
8987
8988#endif /* IEM_WITH_SETJMP */
8989
8990#ifndef IN_RING3
8991/**
8992 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8993 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8994 *
8995 * Allows the instruction to be completed and retired, while the IEM user will
8996 * return to ring-3 immediately afterwards and do the postponed writes there.
8997 *
8998 * @returns VBox status code (no strict statuses). Caller must check
8999 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9001 * @param pvMem The mapping.
9002 * @param fAccess The kind of access.
9003 */
9004IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9005{
9006 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9007 AssertReturn(iMemMap >= 0, iMemMap);
9008
9009 /* If it's bounce buffered, we may need to write back the buffer. */
9010 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9011 {
9012 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9013 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9014 }
9015 /* Otherwise unlock it. */
9016 else
9017 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9018
9019 /* Free the entry. */
9020 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9021 Assert(pVCpu->iem.s.cActiveMappings != 0);
9022 pVCpu->iem.s.cActiveMappings--;
9023 return VINF_SUCCESS;
9024}
9025#endif
9026
9027
9028/**
9029 * Rollbacks mappings, releasing page locks and such.
9030 *
9031 * The caller shall only call this after checking cActiveMappings.
9032 *
9033 * @returns Strict VBox status code to pass up.
9034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9035 */
9036IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9037{
9038 Assert(pVCpu->iem.s.cActiveMappings > 0);
9039
9040 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9041 while (iMemMap-- > 0)
9042 {
9043 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9044 if (fAccess != IEM_ACCESS_INVALID)
9045 {
9046 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9047 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9048 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9049 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9050 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9051 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9052 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9053 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9054 pVCpu->iem.s.cActiveMappings--;
9055 }
9056 }
9057}
9058
9059
9060/**
9061 * Fetches a data byte.
9062 *
9063 * @returns Strict VBox status code.
9064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9065 * @param pu8Dst Where to return the byte.
9066 * @param iSegReg The index of the segment register to use for
9067 * this access. The base and limits are checked.
9068 * @param GCPtrMem The address of the guest memory.
9069 */
9070IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9071{
9072 /* The lazy approach for now... */
9073 uint8_t const *pu8Src;
9074 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9075 if (rc == VINF_SUCCESS)
9076 {
9077 *pu8Dst = *pu8Src;
9078 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9079 }
9080 return rc;
9081}
9082
9083
9084#ifdef IEM_WITH_SETJMP
9085/**
9086 * Fetches a data byte, longjmp on error.
9087 *
9088 * @returns The byte.
9089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9090 * @param iSegReg The index of the segment register to use for
9091 * this access. The base and limits are checked.
9092 * @param GCPtrMem The address of the guest memory.
9093 */
9094DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9095{
9096 /* The lazy approach for now... */
9097 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9098 uint8_t const bRet = *pu8Src;
9099 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9100 return bRet;
9101}
9102#endif /* IEM_WITH_SETJMP */
9103
9104
9105/**
9106 * Fetches a data word.
9107 *
9108 * @returns Strict VBox status code.
9109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9110 * @param pu16Dst Where to return the word.
9111 * @param iSegReg The index of the segment register to use for
9112 * this access. The base and limits are checked.
9113 * @param GCPtrMem The address of the guest memory.
9114 */
9115IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9116{
9117 /* The lazy approach for now... */
9118 uint16_t const *pu16Src;
9119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9120 if (rc == VINF_SUCCESS)
9121 {
9122 *pu16Dst = *pu16Src;
9123 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9124 }
9125 return rc;
9126}
9127
9128
9129#ifdef IEM_WITH_SETJMP
9130/**
9131 * Fetches a data word, longjmp on error.
9132 *
9133 * @returns The word
9134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9135 * @param iSegReg The index of the segment register to use for
9136 * this access. The base and limits are checked.
9137 * @param GCPtrMem The address of the guest memory.
9138 */
9139DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9140{
9141 /* The lazy approach for now... */
9142 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9143 uint16_t const u16Ret = *pu16Src;
9144 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9145 return u16Ret;
9146}
9147#endif
9148
9149
9150/**
9151 * Fetches a data dword.
9152 *
9153 * @returns Strict VBox status code.
9154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9155 * @param pu32Dst Where to return the dword.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 */
9160IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9161{
9162 /* The lazy approach for now... */
9163 uint32_t const *pu32Src;
9164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9165 if (rc == VINF_SUCCESS)
9166 {
9167 *pu32Dst = *pu32Src;
9168 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9169 }
9170 return rc;
9171}
9172
9173
9174#ifdef IEM_WITH_SETJMP
9175
9176IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9177{
9178 Assert(cbMem >= 1);
9179 Assert(iSegReg < X86_SREG_COUNT);
9180
9181 /*
9182 * 64-bit mode is simpler.
9183 */
9184 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9185 {
9186 if (iSegReg >= X86_SREG_FS)
9187 {
9188 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9189 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9190 GCPtrMem += pSel->u64Base;
9191 }
9192
9193 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9194 return GCPtrMem;
9195 }
9196 /*
9197 * 16-bit and 32-bit segmentation.
9198 */
9199 else
9200 {
9201 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9202 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9203 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9204 == X86DESCATTR_P /* data, expand up */
9205 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9206 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9207 {
9208 /* expand up */
9209 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9210 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9211 && GCPtrLast32 > (uint32_t)GCPtrMem))
9212 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9213 }
9214 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9215 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9216 {
9217 /* expand down */
9218 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9219 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9220 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9221 && GCPtrLast32 > (uint32_t)GCPtrMem))
9222 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9223 }
9224 else
9225 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9226 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9227 }
9228 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9229}
9230
9231
9232IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9233{
9234 Assert(cbMem >= 1);
9235 Assert(iSegReg < X86_SREG_COUNT);
9236
9237 /*
9238 * 64-bit mode is simpler.
9239 */
9240 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9241 {
9242 if (iSegReg >= X86_SREG_FS)
9243 {
9244 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9245 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9246 GCPtrMem += pSel->u64Base;
9247 }
9248
9249 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9250 return GCPtrMem;
9251 }
9252 /*
9253 * 16-bit and 32-bit segmentation.
9254 */
9255 else
9256 {
9257 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9258 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9259 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9260 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9261 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9262 {
9263 /* expand up */
9264 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9265 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9266 && GCPtrLast32 > (uint32_t)GCPtrMem))
9267 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9268 }
9269 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9270 {
9271 /* expand down */
9272 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9273 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9274 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9275 && GCPtrLast32 > (uint32_t)GCPtrMem))
9276 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9277 }
9278 else
9279 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9280 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9281 }
9282 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9283}
9284
9285
9286/**
9287 * Fetches a data dword, longjmp on error, fallback/safe version.
9288 *
9289 * @returns The dword
9290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9291 * @param iSegReg The index of the segment register to use for
9292 * this access. The base and limits are checked.
9293 * @param GCPtrMem The address of the guest memory.
9294 */
9295IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9296{
9297 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9298 uint32_t const u32Ret = *pu32Src;
9299 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9300 return u32Ret;
9301}
9302
9303
9304/**
9305 * Fetches a data dword, longjmp on error.
9306 *
9307 * @returns The dword
9308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9309 * @param iSegReg The index of the segment register to use for
9310 * this access. The base and limits are checked.
9311 * @param GCPtrMem The address of the guest memory.
9312 */
9313DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9314{
9315# ifdef IEM_WITH_DATA_TLB
9316 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9317 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9318 {
9319 /// @todo more later.
9320 }
9321
9322 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9323# else
9324 /* The lazy approach. */
9325 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9326 uint32_t const u32Ret = *pu32Src;
9327 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9328 return u32Ret;
9329# endif
9330}
9331#endif
9332
9333
9334#ifdef SOME_UNUSED_FUNCTION
9335/**
9336 * Fetches a data dword and sign extends it to a qword.
9337 *
9338 * @returns Strict VBox status code.
9339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9340 * @param pu64Dst Where to return the sign extended value.
9341 * @param iSegReg The index of the segment register to use for
9342 * this access. The base and limits are checked.
9343 * @param GCPtrMem The address of the guest memory.
9344 */
9345IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9346{
9347 /* The lazy approach for now... */
9348 int32_t const *pi32Src;
9349 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9350 if (rc == VINF_SUCCESS)
9351 {
9352 *pu64Dst = *pi32Src;
9353 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9354 }
9355#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9356 else
9357 *pu64Dst = 0;
9358#endif
9359 return rc;
9360}
9361#endif
9362
9363
9364/**
9365 * Fetches a data qword.
9366 *
9367 * @returns Strict VBox status code.
9368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9369 * @param pu64Dst Where to return the qword.
9370 * @param iSegReg The index of the segment register to use for
9371 * this access. The base and limits are checked.
9372 * @param GCPtrMem The address of the guest memory.
9373 */
9374IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9375{
9376 /* The lazy approach for now... */
9377 uint64_t const *pu64Src;
9378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9379 if (rc == VINF_SUCCESS)
9380 {
9381 *pu64Dst = *pu64Src;
9382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9383 }
9384 return rc;
9385}
9386
9387
9388#ifdef IEM_WITH_SETJMP
9389/**
9390 * Fetches a data qword, longjmp on error.
9391 *
9392 * @returns The qword.
9393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 */
9398DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9399{
9400 /* The lazy approach for now... */
9401 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9402 uint64_t const u64Ret = *pu64Src;
9403 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9404 return u64Ret;
9405}
9406#endif
9407
9408
9409/**
9410 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9411 *
9412 * @returns Strict VBox status code.
9413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9414 * @param pu64Dst Where to return the qword.
9415 * @param iSegReg The index of the segment register to use for
9416 * this access. The base and limits are checked.
9417 * @param GCPtrMem The address of the guest memory.
9418 */
9419IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9420{
9421 /* The lazy approach for now... */
9422 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9423 if (RT_UNLIKELY(GCPtrMem & 15))
9424 return iemRaiseGeneralProtectionFault0(pVCpu);
9425
9426 uint64_t const *pu64Src;
9427 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9428 if (rc == VINF_SUCCESS)
9429 {
9430 *pu64Dst = *pu64Src;
9431 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9432 }
9433 return rc;
9434}
9435
9436
9437#ifdef IEM_WITH_SETJMP
9438/**
9439 * Fetches a data qword, longjmp on error.
9440 *
9441 * @returns The qword.
9442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9443 * @param iSegReg The index of the segment register to use for
9444 * this access. The base and limits are checked.
9445 * @param GCPtrMem The address of the guest memory.
9446 */
9447DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9448{
9449 /* The lazy approach for now... */
9450 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9451 if (RT_LIKELY(!(GCPtrMem & 15)))
9452 {
9453 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9454 uint64_t const u64Ret = *pu64Src;
9455 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9456 return u64Ret;
9457 }
9458
9459 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9460 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9461}
9462#endif
9463
9464
9465/**
9466 * Fetches a data tword.
9467 *
9468 * @returns Strict VBox status code.
9469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9470 * @param pr80Dst Where to return the tword.
9471 * @param iSegReg The index of the segment register to use for
9472 * this access. The base and limits are checked.
9473 * @param GCPtrMem The address of the guest memory.
9474 */
9475IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9476{
9477 /* The lazy approach for now... */
9478 PCRTFLOAT80U pr80Src;
9479 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9480 if (rc == VINF_SUCCESS)
9481 {
9482 *pr80Dst = *pr80Src;
9483 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9484 }
9485 return rc;
9486}
9487
9488
9489#ifdef IEM_WITH_SETJMP
9490/**
9491 * Fetches a data tword, longjmp on error.
9492 *
9493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9494 * @param pr80Dst Where to return the tword.
9495 * @param iSegReg The index of the segment register to use for
9496 * this access. The base and limits are checked.
9497 * @param GCPtrMem The address of the guest memory.
9498 */
9499DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9500{
9501 /* The lazy approach for now... */
9502 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9503 *pr80Dst = *pr80Src;
9504 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9505}
9506#endif
9507
9508
9509/**
9510 * Fetches a data dqword (double qword), generally SSE related.
9511 *
9512 * @returns Strict VBox status code.
9513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9514 * @param pu128Dst Where to return the qword.
9515 * @param iSegReg The index of the segment register to use for
9516 * this access. The base and limits are checked.
9517 * @param GCPtrMem The address of the guest memory.
9518 */
9519IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9520{
9521 /* The lazy approach for now... */
9522 PCRTUINT128U pu128Src;
9523 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9524 if (rc == VINF_SUCCESS)
9525 {
9526 pu128Dst->au64[0] = pu128Src->au64[0];
9527 pu128Dst->au64[1] = pu128Src->au64[1];
9528 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9529 }
9530 return rc;
9531}
9532
9533
9534#ifdef IEM_WITH_SETJMP
9535/**
9536 * Fetches a data dqword (double qword), generally SSE related.
9537 *
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param pu128Dst Where to return the qword.
9540 * @param iSegReg The index of the segment register to use for
9541 * this access. The base and limits are checked.
9542 * @param GCPtrMem The address of the guest memory.
9543 */
9544IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /* The lazy approach for now... */
9547 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9548 pu128Dst->au64[0] = pu128Src->au64[0];
9549 pu128Dst->au64[1] = pu128Src->au64[1];
9550 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9551}
9552#endif
9553
9554
9555/**
9556 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9557 * related.
9558 *
9559 * Raises \#GP(0) if not aligned.
9560 *
9561 * @returns Strict VBox status code.
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pu128Dst Where to return the qword.
9564 * @param iSegReg The index of the segment register to use for
9565 * this access. The base and limits are checked.
9566 * @param GCPtrMem The address of the guest memory.
9567 */
9568IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9569{
9570 /* The lazy approach for now... */
9571 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9572 if ( (GCPtrMem & 15)
9573 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9574 return iemRaiseGeneralProtectionFault0(pVCpu);
9575
9576 PCRTUINT128U pu128Src;
9577 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9578 if (rc == VINF_SUCCESS)
9579 {
9580 pu128Dst->au64[0] = pu128Src->au64[0];
9581 pu128Dst->au64[1] = pu128Src->au64[1];
9582 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9583 }
9584 return rc;
9585}
9586
9587
9588#ifdef IEM_WITH_SETJMP
9589/**
9590 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9591 * related, longjmp on error.
9592 *
9593 * Raises \#GP(0) if not aligned.
9594 *
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param pu128Dst Where to return the qword.
9597 * @param iSegReg The index of the segment register to use for
9598 * this access. The base and limits are checked.
9599 * @param GCPtrMem The address of the guest memory.
9600 */
9601DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9602{
9603 /* The lazy approach for now... */
9604 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9605 if ( (GCPtrMem & 15) == 0
9606 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9607 {
9608 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9609 pu128Dst->au64[0] = pu128Src->au64[0];
9610 pu128Dst->au64[1] = pu128Src->au64[1];
9611 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9612 return;
9613 }
9614
9615 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9616 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9617}
9618#endif
9619
9620
9621/**
9622 * Fetches a data oword (octo word), generally AVX related.
9623 *
9624 * @returns Strict VBox status code.
9625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9626 * @param pu256Dst Where to return the qword.
9627 * @param iSegReg The index of the segment register to use for
9628 * this access. The base and limits are checked.
9629 * @param GCPtrMem The address of the guest memory.
9630 */
9631IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9632{
9633 /* The lazy approach for now... */
9634 PCRTUINT256U pu256Src;
9635 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9636 if (rc == VINF_SUCCESS)
9637 {
9638 pu256Dst->au64[0] = pu256Src->au64[0];
9639 pu256Dst->au64[1] = pu256Src->au64[1];
9640 pu256Dst->au64[2] = pu256Src->au64[2];
9641 pu256Dst->au64[3] = pu256Src->au64[3];
9642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9643 }
9644 return rc;
9645}
9646
9647
9648#ifdef IEM_WITH_SETJMP
9649/**
9650 * Fetches a data oword (octo word), generally AVX related.
9651 *
9652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9653 * @param pu256Dst Where to return the qword.
9654 * @param iSegReg The index of the segment register to use for
9655 * this access. The base and limits are checked.
9656 * @param GCPtrMem The address of the guest memory.
9657 */
9658IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9659{
9660 /* The lazy approach for now... */
9661 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9662 pu256Dst->au64[0] = pu256Src->au64[0];
9663 pu256Dst->au64[1] = pu256Src->au64[1];
9664 pu256Dst->au64[2] = pu256Src->au64[2];
9665 pu256Dst->au64[3] = pu256Src->au64[3];
9666 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9667}
9668#endif
9669
9670
9671/**
9672 * Fetches a data oword (octo word) at an aligned address, generally AVX
9673 * related.
9674 *
9675 * Raises \#GP(0) if not aligned.
9676 *
9677 * @returns Strict VBox status code.
9678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9679 * @param pu256Dst Where to return the qword.
9680 * @param iSegReg The index of the segment register to use for
9681 * this access. The base and limits are checked.
9682 * @param GCPtrMem The address of the guest memory.
9683 */
9684IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9685{
9686 /* The lazy approach for now... */
9687 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9688 if (GCPtrMem & 31)
9689 return iemRaiseGeneralProtectionFault0(pVCpu);
9690
9691 PCRTUINT256U pu256Src;
9692 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9693 if (rc == VINF_SUCCESS)
9694 {
9695 pu256Dst->au64[0] = pu256Src->au64[0];
9696 pu256Dst->au64[1] = pu256Src->au64[1];
9697 pu256Dst->au64[2] = pu256Src->au64[2];
9698 pu256Dst->au64[3] = pu256Src->au64[3];
9699 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9700 }
9701 return rc;
9702}
9703
9704
9705#ifdef IEM_WITH_SETJMP
9706/**
9707 * Fetches a data oword (octo word) at an aligned address, generally AVX
9708 * related, longjmp on error.
9709 *
9710 * Raises \#GP(0) if not aligned.
9711 *
9712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9713 * @param pu256Dst Where to return the qword.
9714 * @param iSegReg The index of the segment register to use for
9715 * this access. The base and limits are checked.
9716 * @param GCPtrMem The address of the guest memory.
9717 */
9718DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9719{
9720 /* The lazy approach for now... */
9721 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9722 if ((GCPtrMem & 31) == 0)
9723 {
9724 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9725 pu256Dst->au64[0] = pu256Src->au64[0];
9726 pu256Dst->au64[1] = pu256Src->au64[1];
9727 pu256Dst->au64[2] = pu256Src->au64[2];
9728 pu256Dst->au64[3] = pu256Src->au64[3];
9729 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9730 return;
9731 }
9732
9733 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9734 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9735}
9736#endif
9737
9738
9739
9740/**
9741 * Fetches a descriptor register (lgdt, lidt).
9742 *
9743 * @returns Strict VBox status code.
9744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9745 * @param pcbLimit Where to return the limit.
9746 * @param pGCPtrBase Where to return the base.
9747 * @param iSegReg The index of the segment register to use for
9748 * this access. The base and limits are checked.
9749 * @param GCPtrMem The address of the guest memory.
9750 * @param enmOpSize The effective operand size.
9751 */
9752IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9753 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9754{
9755 /*
9756 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9757 * little special:
9758 * - The two reads are done separately.
9759 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9760 * - We suspect the 386 to actually commit the limit before the base in
9761 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9762 * don't try emulate this eccentric behavior, because it's not well
9763 * enough understood and rather hard to trigger.
9764 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9765 */
9766 VBOXSTRICTRC rcStrict;
9767 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9768 {
9769 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9770 if (rcStrict == VINF_SUCCESS)
9771 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9772 }
9773 else
9774 {
9775 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9776 if (enmOpSize == IEMMODE_32BIT)
9777 {
9778 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9779 {
9780 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9781 if (rcStrict == VINF_SUCCESS)
9782 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9783 }
9784 else
9785 {
9786 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9787 if (rcStrict == VINF_SUCCESS)
9788 {
9789 *pcbLimit = (uint16_t)uTmp;
9790 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9791 }
9792 }
9793 if (rcStrict == VINF_SUCCESS)
9794 *pGCPtrBase = uTmp;
9795 }
9796 else
9797 {
9798 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9799 if (rcStrict == VINF_SUCCESS)
9800 {
9801 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9802 if (rcStrict == VINF_SUCCESS)
9803 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9804 }
9805 }
9806 }
9807 return rcStrict;
9808}
9809
9810
9811
9812/**
9813 * Stores a data byte.
9814 *
9815 * @returns Strict VBox status code.
9816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9817 * @param iSegReg The index of the segment register to use for
9818 * this access. The base and limits are checked.
9819 * @param GCPtrMem The address of the guest memory.
9820 * @param u8Value The value to store.
9821 */
9822IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9823{
9824 /* The lazy approach for now... */
9825 uint8_t *pu8Dst;
9826 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9827 if (rc == VINF_SUCCESS)
9828 {
9829 *pu8Dst = u8Value;
9830 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9831 }
9832 return rc;
9833}
9834
9835
9836#ifdef IEM_WITH_SETJMP
9837/**
9838 * Stores a data byte, longjmp on error.
9839 *
9840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9841 * @param iSegReg The index of the segment register to use for
9842 * this access. The base and limits are checked.
9843 * @param GCPtrMem The address of the guest memory.
9844 * @param u8Value The value to store.
9845 */
9846IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9847{
9848 /* The lazy approach for now... */
9849 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9850 *pu8Dst = u8Value;
9851 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9852}
9853#endif
9854
9855
9856/**
9857 * Stores a data word.
9858 *
9859 * @returns Strict VBox status code.
9860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9861 * @param iSegReg The index of the segment register to use for
9862 * this access. The base and limits are checked.
9863 * @param GCPtrMem The address of the guest memory.
9864 * @param u16Value The value to store.
9865 */
9866IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9867{
9868 /* The lazy approach for now... */
9869 uint16_t *pu16Dst;
9870 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9871 if (rc == VINF_SUCCESS)
9872 {
9873 *pu16Dst = u16Value;
9874 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9875 }
9876 return rc;
9877}
9878
9879
9880#ifdef IEM_WITH_SETJMP
9881/**
9882 * Stores a data word, longjmp on error.
9883 *
9884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9885 * @param iSegReg The index of the segment register to use for
9886 * this access. The base and limits are checked.
9887 * @param GCPtrMem The address of the guest memory.
9888 * @param u16Value The value to store.
9889 */
9890IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9891{
9892 /* The lazy approach for now... */
9893 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9894 *pu16Dst = u16Value;
9895 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9896}
9897#endif
9898
9899
9900/**
9901 * Stores a data dword.
9902 *
9903 * @returns Strict VBox status code.
9904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9905 * @param iSegReg The index of the segment register to use for
9906 * this access. The base and limits are checked.
9907 * @param GCPtrMem The address of the guest memory.
9908 * @param u32Value The value to store.
9909 */
9910IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9911{
9912 /* The lazy approach for now... */
9913 uint32_t *pu32Dst;
9914 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9915 if (rc == VINF_SUCCESS)
9916 {
9917 *pu32Dst = u32Value;
9918 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9919 }
9920 return rc;
9921}
9922
9923
9924#ifdef IEM_WITH_SETJMP
9925/**
9926 * Stores a data dword.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u32Value The value to store.
9934 */
9935IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9936{
9937 /* The lazy approach for now... */
9938 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9939 *pu32Dst = u32Value;
9940 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9941}
9942#endif
9943
9944
9945/**
9946 * Stores a data qword.
9947 *
9948 * @returns Strict VBox status code.
9949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9950 * @param iSegReg The index of the segment register to use for
9951 * this access. The base and limits are checked.
9952 * @param GCPtrMem The address of the guest memory.
9953 * @param u64Value The value to store.
9954 */
9955IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9956{
9957 /* The lazy approach for now... */
9958 uint64_t *pu64Dst;
9959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9960 if (rc == VINF_SUCCESS)
9961 {
9962 *pu64Dst = u64Value;
9963 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9964 }
9965 return rc;
9966}
9967
9968
9969#ifdef IEM_WITH_SETJMP
9970/**
9971 * Stores a data qword, longjmp on error.
9972 *
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u64Value The value to store.
9978 */
9979IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9980{
9981 /* The lazy approach for now... */
9982 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9983 *pu64Dst = u64Value;
9984 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9985}
9986#endif
9987
9988
9989/**
9990 * Stores a data dqword.
9991 *
9992 * @returns Strict VBox status code.
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 * @param u128Value The value to store.
9998 */
9999IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10000{
10001 /* The lazy approach for now... */
10002 PRTUINT128U pu128Dst;
10003 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10004 if (rc == VINF_SUCCESS)
10005 {
10006 pu128Dst->au64[0] = u128Value.au64[0];
10007 pu128Dst->au64[1] = u128Value.au64[1];
10008 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10009 }
10010 return rc;
10011}
10012
10013
10014#ifdef IEM_WITH_SETJMP
10015/**
10016 * Stores a data dqword, longjmp on error.
10017 *
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u128Value The value to store.
10023 */
10024IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10025{
10026 /* The lazy approach for now... */
10027 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10028 pu128Dst->au64[0] = u128Value.au64[0];
10029 pu128Dst->au64[1] = u128Value.au64[1];
10030 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10031}
10032#endif
10033
10034
10035/**
10036 * Stores a data dqword, SSE aligned.
10037 *
10038 * @returns Strict VBox status code.
10039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10040 * @param iSegReg The index of the segment register to use for
10041 * this access. The base and limits are checked.
10042 * @param GCPtrMem The address of the guest memory.
10043 * @param u128Value The value to store.
10044 */
10045IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10046{
10047 /* The lazy approach for now... */
10048 if ( (GCPtrMem & 15)
10049 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10050 return iemRaiseGeneralProtectionFault0(pVCpu);
10051
10052 PRTUINT128U pu128Dst;
10053 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10054 if (rc == VINF_SUCCESS)
10055 {
10056 pu128Dst->au64[0] = u128Value.au64[0];
10057 pu128Dst->au64[1] = u128Value.au64[1];
10058 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10059 }
10060 return rc;
10061}
10062
10063
10064#ifdef IEM_WITH_SETJMP
10065/**
10066 * Stores a data dqword, SSE aligned.
10067 *
10068 * @returns Strict VBox status code.
10069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10070 * @param iSegReg The index of the segment register to use for
10071 * this access. The base and limits are checked.
10072 * @param GCPtrMem The address of the guest memory.
10073 * @param u128Value The value to store.
10074 */
10075DECL_NO_INLINE(IEM_STATIC, void)
10076iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10077{
10078 /* The lazy approach for now... */
10079 if ( (GCPtrMem & 15) == 0
10080 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10081 {
10082 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10083 pu128Dst->au64[0] = u128Value.au64[0];
10084 pu128Dst->au64[1] = u128Value.au64[1];
10085 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10086 return;
10087 }
10088
10089 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10090 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10091}
10092#endif
10093
10094
10095/**
10096 * Stores a data dqword.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10100 * @param iSegReg The index of the segment register to use for
10101 * this access. The base and limits are checked.
10102 * @param GCPtrMem The address of the guest memory.
10103 * @param pu256Value Pointer to the value to store.
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10106{
10107 /* The lazy approach for now... */
10108 PRTUINT256U pu256Dst;
10109 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10110 if (rc == VINF_SUCCESS)
10111 {
10112 pu256Dst->au64[0] = pu256Value->au64[0];
10113 pu256Dst->au64[1] = pu256Value->au64[1];
10114 pu256Dst->au64[2] = pu256Value->au64[2];
10115 pu256Dst->au64[3] = pu256Value->au64[3];
10116 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10117 }
10118 return rc;
10119}
10120
10121
10122#ifdef IEM_WITH_SETJMP
10123/**
10124 * Stores a data dqword, longjmp on error.
10125 *
10126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10127 * @param iSegReg The index of the segment register to use for
10128 * this access. The base and limits are checked.
10129 * @param GCPtrMem The address of the guest memory.
10130 * @param pu256Value Pointer to the value to store.
10131 */
10132IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10133{
10134 /* The lazy approach for now... */
10135 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10136 pu256Dst->au64[0] = pu256Value->au64[0];
10137 pu256Dst->au64[1] = pu256Value->au64[1];
10138 pu256Dst->au64[2] = pu256Value->au64[2];
10139 pu256Dst->au64[3] = pu256Value->au64[3];
10140 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10141}
10142#endif
10143
10144
10145/**
10146 * Stores a data dqword, AVX aligned.
10147 *
10148 * @returns Strict VBox status code.
10149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10150 * @param iSegReg The index of the segment register to use for
10151 * this access. The base and limits are checked.
10152 * @param GCPtrMem The address of the guest memory.
10153 * @param pu256Value Pointer to the value to store.
10154 */
10155IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10156{
10157 /* The lazy approach for now... */
10158 if (GCPtrMem & 31)
10159 return iemRaiseGeneralProtectionFault0(pVCpu);
10160
10161 PRTUINT256U pu256Dst;
10162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10163 if (rc == VINF_SUCCESS)
10164 {
10165 pu256Dst->au64[0] = pu256Value->au64[0];
10166 pu256Dst->au64[1] = pu256Value->au64[1];
10167 pu256Dst->au64[2] = pu256Value->au64[2];
10168 pu256Dst->au64[3] = pu256Value->au64[3];
10169 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10170 }
10171 return rc;
10172}
10173
10174
10175#ifdef IEM_WITH_SETJMP
10176/**
10177 * Stores a data dqword, AVX aligned.
10178 *
10179 * @returns Strict VBox status code.
10180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10181 * @param iSegReg The index of the segment register to use for
10182 * this access. The base and limits are checked.
10183 * @param GCPtrMem The address of the guest memory.
10184 * @param pu256Value Pointer to the value to store.
10185 */
10186DECL_NO_INLINE(IEM_STATIC, void)
10187iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10188{
10189 /* The lazy approach for now... */
10190 if ((GCPtrMem & 31) == 0)
10191 {
10192 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10193 pu256Dst->au64[0] = pu256Value->au64[0];
10194 pu256Dst->au64[1] = pu256Value->au64[1];
10195 pu256Dst->au64[2] = pu256Value->au64[2];
10196 pu256Dst->au64[3] = pu256Value->au64[3];
10197 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10198 return;
10199 }
10200
10201 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10203}
10204#endif
10205
10206
10207/**
10208 * Stores a descriptor register (sgdt, sidt).
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param cbLimit The limit.
10213 * @param GCPtrBase The base address.
10214 * @param iSegReg The index of the segment register to use for
10215 * this access. The base and limits are checked.
10216 * @param GCPtrMem The address of the guest memory.
10217 */
10218IEM_STATIC VBOXSTRICTRC
10219iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10220{
10221 /*
10222 * The SIDT and SGDT instructions actually stores the data using two
10223 * independent writes. The instructions does not respond to opsize prefixes.
10224 */
10225 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10226 if (rcStrict == VINF_SUCCESS)
10227 {
10228 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10229 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10230 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10231 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10232 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10233 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10234 else
10235 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10236 }
10237 return rcStrict;
10238}
10239
10240
10241/**
10242 * Pushes a word onto the stack.
10243 *
10244 * @returns Strict VBox status code.
10245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10246 * @param u16Value The value to push.
10247 */
10248IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10249{
10250 /* Increment the stack pointer. */
10251 uint64_t uNewRsp;
10252 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10253
10254 /* Write the word the lazy way. */
10255 uint16_t *pu16Dst;
10256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10257 if (rc == VINF_SUCCESS)
10258 {
10259 *pu16Dst = u16Value;
10260 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10261 }
10262
10263 /* Commit the new RSP value unless we an access handler made trouble. */
10264 if (rc == VINF_SUCCESS)
10265 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10266
10267 return rc;
10268}
10269
10270
10271/**
10272 * Pushes a dword onto the stack.
10273 *
10274 * @returns Strict VBox status code.
10275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10276 * @param u32Value The value to push.
10277 */
10278IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10279{
10280 /* Increment the stack pointer. */
10281 uint64_t uNewRsp;
10282 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10283
10284 /* Write the dword the lazy way. */
10285 uint32_t *pu32Dst;
10286 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10287 if (rc == VINF_SUCCESS)
10288 {
10289 *pu32Dst = u32Value;
10290 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10291 }
10292
10293 /* Commit the new RSP value unless we an access handler made trouble. */
10294 if (rc == VINF_SUCCESS)
10295 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10296
10297 return rc;
10298}
10299
10300
10301/**
10302 * Pushes a dword segment register value onto the stack.
10303 *
10304 * @returns Strict VBox status code.
10305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10306 * @param u32Value The value to push.
10307 */
10308IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10309{
10310 /* Increment the stack pointer. */
10311 uint64_t uNewRsp;
10312 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10313
10314 /* The intel docs talks about zero extending the selector register
10315 value. My actual intel CPU here might be zero extending the value
10316 but it still only writes the lower word... */
10317 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10318 * happens when crossing an electric page boundrary, is the high word checked
10319 * for write accessibility or not? Probably it is. What about segment limits?
10320 * It appears this behavior is also shared with trap error codes.
10321 *
10322 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10323 * ancient hardware when it actually did change. */
10324 uint16_t *pu16Dst;
10325 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10326 if (rc == VINF_SUCCESS)
10327 {
10328 *pu16Dst = (uint16_t)u32Value;
10329 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10330 }
10331
10332 /* Commit the new RSP value unless we an access handler made trouble. */
10333 if (rc == VINF_SUCCESS)
10334 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10335
10336 return rc;
10337}
10338
10339
10340/**
10341 * Pushes a qword onto the stack.
10342 *
10343 * @returns Strict VBox status code.
10344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10345 * @param u64Value The value to push.
10346 */
10347IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10348{
10349 /* Increment the stack pointer. */
10350 uint64_t uNewRsp;
10351 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10352
10353 /* Write the word the lazy way. */
10354 uint64_t *pu64Dst;
10355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10356 if (rc == VINF_SUCCESS)
10357 {
10358 *pu64Dst = u64Value;
10359 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10360 }
10361
10362 /* Commit the new RSP value unless we an access handler made trouble. */
10363 if (rc == VINF_SUCCESS)
10364 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10365
10366 return rc;
10367}
10368
10369
10370/**
10371 * Pops a word from the stack.
10372 *
10373 * @returns Strict VBox status code.
10374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10375 * @param pu16Value Where to store the popped value.
10376 */
10377IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10378{
10379 /* Increment the stack pointer. */
10380 uint64_t uNewRsp;
10381 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10382
10383 /* Write the word the lazy way. */
10384 uint16_t const *pu16Src;
10385 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10386 if (rc == VINF_SUCCESS)
10387 {
10388 *pu16Value = *pu16Src;
10389 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10390
10391 /* Commit the new RSP value. */
10392 if (rc == VINF_SUCCESS)
10393 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10394 }
10395
10396 return rc;
10397}
10398
10399
10400/**
10401 * Pops a dword from the stack.
10402 *
10403 * @returns Strict VBox status code.
10404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10405 * @param pu32Value Where to store the popped value.
10406 */
10407IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10408{
10409 /* Increment the stack pointer. */
10410 uint64_t uNewRsp;
10411 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10412
10413 /* Write the word the lazy way. */
10414 uint32_t const *pu32Src;
10415 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10416 if (rc == VINF_SUCCESS)
10417 {
10418 *pu32Value = *pu32Src;
10419 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10420
10421 /* Commit the new RSP value. */
10422 if (rc == VINF_SUCCESS)
10423 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10424 }
10425
10426 return rc;
10427}
10428
10429
10430/**
10431 * Pops a qword from the stack.
10432 *
10433 * @returns Strict VBox status code.
10434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10435 * @param pu64Value Where to store the popped value.
10436 */
10437IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10438{
10439 /* Increment the stack pointer. */
10440 uint64_t uNewRsp;
10441 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10442
10443 /* Write the word the lazy way. */
10444 uint64_t const *pu64Src;
10445 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10446 if (rc == VINF_SUCCESS)
10447 {
10448 *pu64Value = *pu64Src;
10449 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10450
10451 /* Commit the new RSP value. */
10452 if (rc == VINF_SUCCESS)
10453 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10454 }
10455
10456 return rc;
10457}
10458
10459
10460/**
10461 * Pushes a word onto the stack, using a temporary stack pointer.
10462 *
10463 * @returns Strict VBox status code.
10464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10465 * @param u16Value The value to push.
10466 * @param pTmpRsp Pointer to the temporary stack pointer.
10467 */
10468IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10469{
10470 /* Increment the stack pointer. */
10471 RTUINT64U NewRsp = *pTmpRsp;
10472 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10473
10474 /* Write the word the lazy way. */
10475 uint16_t *pu16Dst;
10476 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10477 if (rc == VINF_SUCCESS)
10478 {
10479 *pu16Dst = u16Value;
10480 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10481 }
10482
10483 /* Commit the new RSP value unless we an access handler made trouble. */
10484 if (rc == VINF_SUCCESS)
10485 *pTmpRsp = NewRsp;
10486
10487 return rc;
10488}
10489
10490
10491/**
10492 * Pushes a dword onto the stack, using a temporary stack pointer.
10493 *
10494 * @returns Strict VBox status code.
10495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10496 * @param u32Value The value to push.
10497 * @param pTmpRsp Pointer to the temporary stack pointer.
10498 */
10499IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10500{
10501 /* Increment the stack pointer. */
10502 RTUINT64U NewRsp = *pTmpRsp;
10503 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10504
10505 /* Write the word the lazy way. */
10506 uint32_t *pu32Dst;
10507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10508 if (rc == VINF_SUCCESS)
10509 {
10510 *pu32Dst = u32Value;
10511 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10512 }
10513
10514 /* Commit the new RSP value unless we an access handler made trouble. */
10515 if (rc == VINF_SUCCESS)
10516 *pTmpRsp = NewRsp;
10517
10518 return rc;
10519}
10520
10521
10522/**
10523 * Pushes a dword onto the stack, using a temporary stack pointer.
10524 *
10525 * @returns Strict VBox status code.
10526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10527 * @param u64Value The value to push.
10528 * @param pTmpRsp Pointer to the temporary stack pointer.
10529 */
10530IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10531{
10532 /* Increment the stack pointer. */
10533 RTUINT64U NewRsp = *pTmpRsp;
10534 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10535
10536 /* Write the word the lazy way. */
10537 uint64_t *pu64Dst;
10538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10539 if (rc == VINF_SUCCESS)
10540 {
10541 *pu64Dst = u64Value;
10542 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10543 }
10544
10545 /* Commit the new RSP value unless we an access handler made trouble. */
10546 if (rc == VINF_SUCCESS)
10547 *pTmpRsp = NewRsp;
10548
10549 return rc;
10550}
10551
10552
10553/**
10554 * Pops a word from the stack, using a temporary stack pointer.
10555 *
10556 * @returns Strict VBox status code.
10557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10558 * @param pu16Value Where to store the popped value.
10559 * @param pTmpRsp Pointer to the temporary stack pointer.
10560 */
10561IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10562{
10563 /* Increment the stack pointer. */
10564 RTUINT64U NewRsp = *pTmpRsp;
10565 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10566
10567 /* Write the word the lazy way. */
10568 uint16_t const *pu16Src;
10569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10570 if (rc == VINF_SUCCESS)
10571 {
10572 *pu16Value = *pu16Src;
10573 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10574
10575 /* Commit the new RSP value. */
10576 if (rc == VINF_SUCCESS)
10577 *pTmpRsp = NewRsp;
10578 }
10579
10580 return rc;
10581}
10582
10583
10584/**
10585 * Pops a dword from the stack, using a temporary stack pointer.
10586 *
10587 * @returns Strict VBox status code.
10588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10589 * @param pu32Value Where to store the popped value.
10590 * @param pTmpRsp Pointer to the temporary stack pointer.
10591 */
10592IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10593{
10594 /* Increment the stack pointer. */
10595 RTUINT64U NewRsp = *pTmpRsp;
10596 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10597
10598 /* Write the word the lazy way. */
10599 uint32_t const *pu32Src;
10600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10601 if (rc == VINF_SUCCESS)
10602 {
10603 *pu32Value = *pu32Src;
10604 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10605
10606 /* Commit the new RSP value. */
10607 if (rc == VINF_SUCCESS)
10608 *pTmpRsp = NewRsp;
10609 }
10610
10611 return rc;
10612}
10613
10614
10615/**
10616 * Pops a qword from the stack, using a temporary stack pointer.
10617 *
10618 * @returns Strict VBox status code.
10619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10620 * @param pu64Value Where to store the popped value.
10621 * @param pTmpRsp Pointer to the temporary stack pointer.
10622 */
10623IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10624{
10625 /* Increment the stack pointer. */
10626 RTUINT64U NewRsp = *pTmpRsp;
10627 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10628
10629 /* Write the word the lazy way. */
10630 uint64_t const *pu64Src;
10631 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10632 if (rcStrict == VINF_SUCCESS)
10633 {
10634 *pu64Value = *pu64Src;
10635 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10636
10637 /* Commit the new RSP value. */
10638 if (rcStrict == VINF_SUCCESS)
10639 *pTmpRsp = NewRsp;
10640 }
10641
10642 return rcStrict;
10643}
10644
10645
10646/**
10647 * Begin a special stack push (used by interrupt, exceptions and such).
10648 *
10649 * This will raise \#SS or \#PF if appropriate.
10650 *
10651 * @returns Strict VBox status code.
10652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10653 * @param cbMem The number of bytes to push onto the stack.
10654 * @param ppvMem Where to return the pointer to the stack memory.
10655 * As with the other memory functions this could be
10656 * direct access or bounce buffered access, so
10657 * don't commit register until the commit call
10658 * succeeds.
10659 * @param puNewRsp Where to return the new RSP value. This must be
10660 * passed unchanged to
10661 * iemMemStackPushCommitSpecial().
10662 */
10663IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10664{
10665 Assert(cbMem < UINT8_MAX);
10666 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10667 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10668}
10669
10670
10671/**
10672 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10673 *
10674 * This will update the rSP.
10675 *
10676 * @returns Strict VBox status code.
10677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10678 * @param pvMem The pointer returned by
10679 * iemMemStackPushBeginSpecial().
10680 * @param uNewRsp The new RSP value returned by
10681 * iemMemStackPushBeginSpecial().
10682 */
10683IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10684{
10685 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10686 if (rcStrict == VINF_SUCCESS)
10687 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10688 return rcStrict;
10689}
10690
10691
10692/**
10693 * Begin a special stack pop (used by iret, retf and such).
10694 *
10695 * This will raise \#SS or \#PF if appropriate.
10696 *
10697 * @returns Strict VBox status code.
10698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10699 * @param cbMem The number of bytes to pop from the stack.
10700 * @param ppvMem Where to return the pointer to the stack memory.
10701 * @param puNewRsp Where to return the new RSP value. This must be
10702 * assigned to CPUMCTX::rsp manually some time
10703 * after iemMemStackPopDoneSpecial() has been
10704 * called.
10705 */
10706IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10707{
10708 Assert(cbMem < UINT8_MAX);
10709 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10710 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10711}
10712
10713
10714/**
10715 * Continue a special stack pop (used by iret and retf).
10716 *
10717 * This will raise \#SS or \#PF if appropriate.
10718 *
10719 * @returns Strict VBox status code.
10720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10721 * @param cbMem The number of bytes to pop from the stack.
10722 * @param ppvMem Where to return the pointer to the stack memory.
10723 * @param puNewRsp Where to return the new RSP value. This must be
10724 * assigned to CPUMCTX::rsp manually some time
10725 * after iemMemStackPopDoneSpecial() has been
10726 * called.
10727 */
10728IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10729{
10730 Assert(cbMem < UINT8_MAX);
10731 RTUINT64U NewRsp;
10732 NewRsp.u = *puNewRsp;
10733 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10734 *puNewRsp = NewRsp.u;
10735 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10736}
10737
10738
10739/**
10740 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10741 * iemMemStackPopContinueSpecial).
10742 *
10743 * The caller will manually commit the rSP.
10744 *
10745 * @returns Strict VBox status code.
10746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10747 * @param pvMem The pointer returned by
10748 * iemMemStackPopBeginSpecial() or
10749 * iemMemStackPopContinueSpecial().
10750 */
10751IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10752{
10753 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10754}
10755
10756
10757/**
10758 * Fetches a system table byte.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10762 * @param pbDst Where to return the byte.
10763 * @param iSegReg The index of the segment register to use for
10764 * this access. The base and limits are checked.
10765 * @param GCPtrMem The address of the guest memory.
10766 */
10767IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10768{
10769 /* The lazy approach for now... */
10770 uint8_t const *pbSrc;
10771 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10772 if (rc == VINF_SUCCESS)
10773 {
10774 *pbDst = *pbSrc;
10775 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10776 }
10777 return rc;
10778}
10779
10780
10781/**
10782 * Fetches a system table word.
10783 *
10784 * @returns Strict VBox status code.
10785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10786 * @param pu16Dst Where to return the word.
10787 * @param iSegReg The index of the segment register to use for
10788 * this access. The base and limits are checked.
10789 * @param GCPtrMem The address of the guest memory.
10790 */
10791IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10792{
10793 /* The lazy approach for now... */
10794 uint16_t const *pu16Src;
10795 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10796 if (rc == VINF_SUCCESS)
10797 {
10798 *pu16Dst = *pu16Src;
10799 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10800 }
10801 return rc;
10802}
10803
10804
10805/**
10806 * Fetches a system table dword.
10807 *
10808 * @returns Strict VBox status code.
10809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10810 * @param pu32Dst Where to return the dword.
10811 * @param iSegReg The index of the segment register to use for
10812 * this access. The base and limits are checked.
10813 * @param GCPtrMem The address of the guest memory.
10814 */
10815IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10816{
10817 /* The lazy approach for now... */
10818 uint32_t const *pu32Src;
10819 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10820 if (rc == VINF_SUCCESS)
10821 {
10822 *pu32Dst = *pu32Src;
10823 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10824 }
10825 return rc;
10826}
10827
10828
10829/**
10830 * Fetches a system table qword.
10831 *
10832 * @returns Strict VBox status code.
10833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10834 * @param pu64Dst Where to return the qword.
10835 * @param iSegReg The index of the segment register to use for
10836 * this access. The base and limits are checked.
10837 * @param GCPtrMem The address of the guest memory.
10838 */
10839IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10840{
10841 /* The lazy approach for now... */
10842 uint64_t const *pu64Src;
10843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10844 if (rc == VINF_SUCCESS)
10845 {
10846 *pu64Dst = *pu64Src;
10847 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10848 }
10849 return rc;
10850}
10851
10852
10853/**
10854 * Fetches a descriptor table entry with caller specified error code.
10855 *
10856 * @returns Strict VBox status code.
10857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10858 * @param pDesc Where to return the descriptor table entry.
10859 * @param uSel The selector which table entry to fetch.
10860 * @param uXcpt The exception to raise on table lookup error.
10861 * @param uErrorCode The error code associated with the exception.
10862 */
10863IEM_STATIC VBOXSTRICTRC
10864iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10865{
10866 AssertPtr(pDesc);
10867 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10868
10869 /** @todo did the 286 require all 8 bytes to be accessible? */
10870 /*
10871 * Get the selector table base and check bounds.
10872 */
10873 RTGCPTR GCPtrBase;
10874 if (uSel & X86_SEL_LDT)
10875 {
10876 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10877 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10878 {
10879 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10880 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10881 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10882 uErrorCode, 0);
10883 }
10884
10885 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10886 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10887 }
10888 else
10889 {
10890 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10891 {
10892 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10893 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10894 uErrorCode, 0);
10895 }
10896 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10897 }
10898
10899 /*
10900 * Read the legacy descriptor and maybe the long mode extensions if
10901 * required.
10902 */
10903 VBOXSTRICTRC rcStrict;
10904 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10905 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10906 else
10907 {
10908 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10909 if (rcStrict == VINF_SUCCESS)
10910 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10911 if (rcStrict == VINF_SUCCESS)
10912 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10913 if (rcStrict == VINF_SUCCESS)
10914 pDesc->Legacy.au16[3] = 0;
10915 else
10916 return rcStrict;
10917 }
10918
10919 if (rcStrict == VINF_SUCCESS)
10920 {
10921 if ( !IEM_IS_LONG_MODE(pVCpu)
10922 || pDesc->Legacy.Gen.u1DescType)
10923 pDesc->Long.au64[1] = 0;
10924 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10925 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10926 else
10927 {
10928 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10929 /** @todo is this the right exception? */
10930 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10931 }
10932 }
10933 return rcStrict;
10934}
10935
10936
10937/**
10938 * Fetches a descriptor table entry.
10939 *
10940 * @returns Strict VBox status code.
10941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10942 * @param pDesc Where to return the descriptor table entry.
10943 * @param uSel The selector which table entry to fetch.
10944 * @param uXcpt The exception to raise on table lookup error.
10945 */
10946IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10947{
10948 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10949}
10950
10951
10952/**
10953 * Fakes a long mode stack selector for SS = 0.
10954 *
10955 * @param pDescSs Where to return the fake stack descriptor.
10956 * @param uDpl The DPL we want.
10957 */
10958IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10959{
10960 pDescSs->Long.au64[0] = 0;
10961 pDescSs->Long.au64[1] = 0;
10962 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10963 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10964 pDescSs->Long.Gen.u2Dpl = uDpl;
10965 pDescSs->Long.Gen.u1Present = 1;
10966 pDescSs->Long.Gen.u1Long = 1;
10967}
10968
10969
10970/**
10971 * Marks the selector descriptor as accessed (only non-system descriptors).
10972 *
10973 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10974 * will therefore skip the limit checks.
10975 *
10976 * @returns Strict VBox status code.
10977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10978 * @param uSel The selector.
10979 */
10980IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10981{
10982 /*
10983 * Get the selector table base and calculate the entry address.
10984 */
10985 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10986 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10987 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10988 GCPtr += uSel & X86_SEL_MASK;
10989
10990 /*
10991 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10992 * ugly stuff to avoid this. This will make sure it's an atomic access
10993 * as well more or less remove any question about 8-bit or 32-bit accesss.
10994 */
10995 VBOXSTRICTRC rcStrict;
10996 uint32_t volatile *pu32;
10997 if ((GCPtr & 3) == 0)
10998 {
10999 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11000 GCPtr += 2 + 2;
11001 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11002 if (rcStrict != VINF_SUCCESS)
11003 return rcStrict;
11004 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11005 }
11006 else
11007 {
11008 /* The misaligned GDT/LDT case, map the whole thing. */
11009 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11010 if (rcStrict != VINF_SUCCESS)
11011 return rcStrict;
11012 switch ((uintptr_t)pu32 & 3)
11013 {
11014 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11015 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11016 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11017 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11018 }
11019 }
11020
11021 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11022}
11023
11024/** @} */
11025
11026
11027/*
11028 * Include the C/C++ implementation of instruction.
11029 */
11030#include "IEMAllCImpl.cpp.h"
11031
11032
11033
11034/** @name "Microcode" macros.
11035 *
11036 * The idea is that we should be able to use the same code to interpret
11037 * instructions as well as recompiler instructions. Thus this obfuscation.
11038 *
11039 * @{
11040 */
11041#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11042#define IEM_MC_END() }
11043#define IEM_MC_PAUSE() do {} while (0)
11044#define IEM_MC_CONTINUE() do {} while (0)
11045
11046/** Internal macro. */
11047#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11048 do \
11049 { \
11050 VBOXSTRICTRC rcStrict2 = a_Expr; \
11051 if (rcStrict2 != VINF_SUCCESS) \
11052 return rcStrict2; \
11053 } while (0)
11054
11055
11056#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11057#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11058#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11059#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11060#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11061#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11062#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11063#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11064#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11065 do { \
11066 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11067 return iemRaiseDeviceNotAvailable(pVCpu); \
11068 } while (0)
11069#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11070 do { \
11071 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11072 return iemRaiseDeviceNotAvailable(pVCpu); \
11073 } while (0)
11074#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11075 do { \
11076 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11077 return iemRaiseMathFault(pVCpu); \
11078 } while (0)
11079#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11080 do { \
11081 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11082 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11083 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11084 return iemRaiseUndefinedOpcode(pVCpu); \
11085 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11086 return iemRaiseDeviceNotAvailable(pVCpu); \
11087 } while (0)
11088#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11089 do { \
11090 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11091 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11092 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11093 return iemRaiseUndefinedOpcode(pVCpu); \
11094 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11095 return iemRaiseDeviceNotAvailable(pVCpu); \
11096 } while (0)
11097#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11098 do { \
11099 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11100 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11101 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11102 return iemRaiseUndefinedOpcode(pVCpu); \
11103 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11104 return iemRaiseDeviceNotAvailable(pVCpu); \
11105 } while (0)
11106#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11107 do { \
11108 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11109 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11110 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11111 return iemRaiseUndefinedOpcode(pVCpu); \
11112 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11113 return iemRaiseDeviceNotAvailable(pVCpu); \
11114 } while (0)
11115#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11116 do { \
11117 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11118 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11119 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11120 return iemRaiseUndefinedOpcode(pVCpu); \
11121 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11122 return iemRaiseDeviceNotAvailable(pVCpu); \
11123 } while (0)
11124#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11125 do { \
11126 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11127 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11128 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11129 return iemRaiseUndefinedOpcode(pVCpu); \
11130 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11131 return iemRaiseDeviceNotAvailable(pVCpu); \
11132 } while (0)
11133#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11134 do { \
11135 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11136 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11137 return iemRaiseUndefinedOpcode(pVCpu); \
11138 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11139 return iemRaiseDeviceNotAvailable(pVCpu); \
11140 } while (0)
11141#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11142 do { \
11143 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11144 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11145 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11146 return iemRaiseUndefinedOpcode(pVCpu); \
11147 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11148 return iemRaiseDeviceNotAvailable(pVCpu); \
11149 } while (0)
11150#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11151 do { \
11152 if (pVCpu->iem.s.uCpl != 0) \
11153 return iemRaiseGeneralProtectionFault0(pVCpu); \
11154 } while (0)
11155#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11156 do { \
11157 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11158 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11161 do { \
11162 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11163 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11164 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 } while (0)
11167#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11168 do { \
11169 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11170 return iemRaiseGeneralProtectionFault0(pVCpu); \
11171 } while (0)
11172
11173
11174#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11175#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11176#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11177#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11178#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11179#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11180#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11181 uint32_t a_Name; \
11182 uint32_t *a_pName = &a_Name
11183#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11184 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11185
11186#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11187#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11188
11189#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11192#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11193#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11194#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11195#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11196#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11197#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11198#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11199#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11200#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11201#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11202#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11203#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11204#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11205#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11206#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11207 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11208 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11209 } while (0)
11210#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11211 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11212 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11213 } while (0)
11214#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11215 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11216 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11217 } while (0)
11218/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11219#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11220 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11221 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11222 } while (0)
11223#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11224 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11225 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11226 } while (0)
11227/** @note Not for IOPL or IF testing or modification. */
11228#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11229#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11230#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11231#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11232
11233#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11234#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11235#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11236#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11237#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11238#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11239#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11240#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11241#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11242#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11243/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11244#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11245 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11246 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11247 } while (0)
11248#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11249 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11250 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11251 } while (0)
11252#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11253 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11254
11255
11256#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11257#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11258/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11259 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11260#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11261#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11262/** @note Not for IOPL or IF testing or modification. */
11263#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11264
11265#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11266#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11267#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11268 do { \
11269 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11270 *pu32Reg += (a_u32Value); \
11271 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11272 } while (0)
11273#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11274
11275#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11276#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11277#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11278 do { \
11279 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11280 *pu32Reg -= (a_u32Value); \
11281 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11282 } while (0)
11283#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11284#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11285
11286#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11287#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11288#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11289#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11290#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11291#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11292#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11293
11294#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11295#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11296#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11297#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11298
11299#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11300#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11301#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11302
11303#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11304#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11305#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11306
11307#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11308#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11309#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11310
11311#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11312#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11313#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11314
11315#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11316
11317#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11318
11319#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11320#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11321#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11322 do { \
11323 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11324 *pu32Reg &= (a_u32Value); \
11325 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11326 } while (0)
11327#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11328
11329#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11330#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11331#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11332 do { \
11333 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11334 *pu32Reg |= (a_u32Value); \
11335 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11336 } while (0)
11337#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11338
11339
11340/** @note Not for IOPL or IF modification. */
11341#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11342/** @note Not for IOPL or IF modification. */
11343#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11344/** @note Not for IOPL or IF modification. */
11345#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11346
11347#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11348
11349/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11350#define IEM_MC_FPU_TO_MMX_MODE() do { \
11351 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11352 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11353 } while (0)
11354
11355/** Switches the FPU state from MMX mode (FTW=0xffff). */
11356#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11357 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11358 } while (0)
11359
11360#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11361 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11362#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11363 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11364#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11365 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11366 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11367 } while (0)
11368#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11369 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11370 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11371 } while (0)
11372#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11373 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11374#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11375 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11376#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11377 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11378
11379#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11380 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11381 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11382 } while (0)
11383#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11384 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11385#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11386 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11387#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11388 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11389#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11390 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11391 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11392 } while (0)
11393#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11394 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11395#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11396 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11397 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11398 } while (0)
11399#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11400 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11401#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11402 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11403 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11404 } while (0)
11405#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11406 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11407#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11408 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11409#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11410 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11411#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11412 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11413#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11414 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11415 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11416 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11417 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11418 } while (0)
11419
11420#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11421 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11422 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11423 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11424 } while (0)
11425#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11426 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11427 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11428 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11429 } while (0)
11430#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11431 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11432 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11433 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11434 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11435 } while (0)
11436#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11437 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11438 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11439 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11440 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11441 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11442 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11443 } while (0)
11444
11445#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11446#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11447 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11448 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11449 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11450 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11451 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11452 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11453 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11454 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11455 } while (0)
11456#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11457 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11458 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11459 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11460 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11461 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11462 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11463 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11464 } while (0)
11465#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11466 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11467 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11468 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11469 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11470 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11471 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11472 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11473 } while (0)
11474#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11475 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11476 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11477 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11478 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11479 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11480 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11481 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11482 } while (0)
11483
11484#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11485 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11486#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11487 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11488#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11489 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11490#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11491 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11492 uintptr_t const iYRegTmp = (a_iYReg); \
11493 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11494 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11495 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11496 } while (0)
11497
11498#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11499 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11500 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11501 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11502 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11503 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11504 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11505 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11506 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11507 } while (0)
11508#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11509 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11510 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11511 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11512 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11513 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11514 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11515 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11516 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11517 } while (0)
11518#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11519 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11520 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11521 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11523 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11525 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11526 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11527 } while (0)
11528
11529#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11530 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11531 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11532 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11533 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11534 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11535 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11536 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11538 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11539 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11540 } while (0)
11541#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11542 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11543 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11544 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11545 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11546 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11547 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11548 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11549 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11550 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11551 } while (0)
11552#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11553 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11554 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11555 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11556 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11561 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11562 } while (0)
11563#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11564 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11565 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11566 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11572 } while (0)
11573
11574#ifndef IEM_WITH_SETJMP
11575# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11577# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11579# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11581#else
11582# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11583 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11584# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11585 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11586# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11587 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11588#endif
11589
11590#ifndef IEM_WITH_SETJMP
11591# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11593# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11595# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11597#else
11598# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11599 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11600# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11601 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11602# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11603 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11604#endif
11605
11606#ifndef IEM_WITH_SETJMP
11607# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11609# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11610 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11611# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11613#else
11614# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11615 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11616# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11617 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11618# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11619 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11620#endif
11621
11622#ifdef SOME_UNUSED_FUNCTION
11623# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11625#endif
11626
11627#ifndef IEM_WITH_SETJMP
11628# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11630# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11632# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11633 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11634# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11636#else
11637# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11640 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11641# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11642 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11643# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11644 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11645#endif
11646
11647#ifndef IEM_WITH_SETJMP
11648# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11652# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11654#else
11655# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11656 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11657# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11658 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11659# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11660 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11661#endif
11662
11663#ifndef IEM_WITH_SETJMP
11664# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11668#else
11669# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11670 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11671# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11672 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11680#else
11681# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11682 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11683# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11684 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11685#endif
11686
11687
11688
11689#ifndef IEM_WITH_SETJMP
11690# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11691 do { \
11692 uint8_t u8Tmp; \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11694 (a_u16Dst) = u8Tmp; \
11695 } while (0)
11696# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11697 do { \
11698 uint8_t u8Tmp; \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11700 (a_u32Dst) = u8Tmp; \
11701 } while (0)
11702# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11703 do { \
11704 uint8_t u8Tmp; \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11706 (a_u64Dst) = u8Tmp; \
11707 } while (0)
11708# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11709 do { \
11710 uint16_t u16Tmp; \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11712 (a_u32Dst) = u16Tmp; \
11713 } while (0)
11714# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11715 do { \
11716 uint16_t u16Tmp; \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11718 (a_u64Dst) = u16Tmp; \
11719 } while (0)
11720# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11721 do { \
11722 uint32_t u32Tmp; \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11724 (a_u64Dst) = u32Tmp; \
11725 } while (0)
11726#else /* IEM_WITH_SETJMP */
11727# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11728 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11729# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11730 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11731# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11734 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11735# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11737# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11739#endif /* IEM_WITH_SETJMP */
11740
11741#ifndef IEM_WITH_SETJMP
11742# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11743 do { \
11744 uint8_t u8Tmp; \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11746 (a_u16Dst) = (int8_t)u8Tmp; \
11747 } while (0)
11748# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11749 do { \
11750 uint8_t u8Tmp; \
11751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11752 (a_u32Dst) = (int8_t)u8Tmp; \
11753 } while (0)
11754# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 do { \
11756 uint8_t u8Tmp; \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11758 (a_u64Dst) = (int8_t)u8Tmp; \
11759 } while (0)
11760# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11761 do { \
11762 uint16_t u16Tmp; \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11764 (a_u32Dst) = (int16_t)u16Tmp; \
11765 } while (0)
11766# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11767 do { \
11768 uint16_t u16Tmp; \
11769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11770 (a_u64Dst) = (int16_t)u16Tmp; \
11771 } while (0)
11772# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11773 do { \
11774 uint32_t u32Tmp; \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11776 (a_u64Dst) = (int32_t)u32Tmp; \
11777 } while (0)
11778#else /* IEM_WITH_SETJMP */
11779# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11780 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11781# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11782 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11785# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11786 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11787# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11788 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11789# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11790 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11791#endif /* IEM_WITH_SETJMP */
11792
11793#ifndef IEM_WITH_SETJMP
11794# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11796# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11798# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11800# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11802#else
11803# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11804 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11805# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11806 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11807# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11808 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11809# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11810 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11811#endif
11812
11813#ifndef IEM_WITH_SETJMP
11814# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11815 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11816# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11818# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11820# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11821 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11822#else
11823# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11824 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11825# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11826 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11827# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11828 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11829# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11830 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11831#endif
11832
11833#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11834#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11835#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11836#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11837#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11838#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11839#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11840 do { \
11841 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11842 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11843 } while (0)
11844
11845#ifndef IEM_WITH_SETJMP
11846# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11848# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11850#else
11851# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11852 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11853# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11854 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11855#endif
11856
11857#ifndef IEM_WITH_SETJMP
11858# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11860# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11862#else
11863# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11864 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11865# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11866 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11867#endif
11868
11869
11870#define IEM_MC_PUSH_U16(a_u16Value) \
11871 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11872#define IEM_MC_PUSH_U32(a_u32Value) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11874#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11876#define IEM_MC_PUSH_U64(a_u64Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11878
11879#define IEM_MC_POP_U16(a_pu16Value) \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11881#define IEM_MC_POP_U32(a_pu32Value) \
11882 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11883#define IEM_MC_POP_U64(a_pu64Value) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11885
11886/** Maps guest memory for direct or bounce buffered access.
11887 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11888 * @remarks May return.
11889 */
11890#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11891 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11892
11893/** Maps guest memory for direct or bounce buffered access.
11894 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11895 * @remarks May return.
11896 */
11897#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11898 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11899
11900/** Commits the memory and unmaps the guest memory.
11901 * @remarks May return.
11902 */
11903#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11905
11906/** Commits the memory and unmaps the guest memory unless the FPU status word
11907 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11908 * that would cause FLD not to store.
11909 *
11910 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11911 * store, while \#P will not.
11912 *
11913 * @remarks May in theory return - for now.
11914 */
11915#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11916 do { \
11917 if ( !(a_u16FSW & X86_FSW_ES) \
11918 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11919 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11920 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11921 } while (0)
11922
11923/** Calculate efficient address from R/M. */
11924#ifndef IEM_WITH_SETJMP
11925# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11926 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11927#else
11928# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11929 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11930#endif
11931
11932#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11933#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11934#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11935#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11936#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11937#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11938#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11939
11940/**
11941 * Defers the rest of the instruction emulation to a C implementation routine
11942 * and returns, only taking the standard parameters.
11943 *
11944 * @param a_pfnCImpl The pointer to the C routine.
11945 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11946 */
11947#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11948
11949/**
11950 * Defers the rest of instruction emulation to a C implementation routine and
11951 * returns, taking one argument in addition to the standard ones.
11952 *
11953 * @param a_pfnCImpl The pointer to the C routine.
11954 * @param a0 The argument.
11955 */
11956#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11957
11958/**
11959 * Defers the rest of the instruction emulation to a C implementation routine
11960 * and returns, taking two arguments in addition to the standard ones.
11961 *
11962 * @param a_pfnCImpl The pointer to the C routine.
11963 * @param a0 The first extra argument.
11964 * @param a1 The second extra argument.
11965 */
11966#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11967
11968/**
11969 * Defers the rest of the instruction emulation to a C implementation routine
11970 * and returns, taking three arguments in addition to the standard ones.
11971 *
11972 * @param a_pfnCImpl The pointer to the C routine.
11973 * @param a0 The first extra argument.
11974 * @param a1 The second extra argument.
11975 * @param a2 The third extra argument.
11976 */
11977#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11978
11979/**
11980 * Defers the rest of the instruction emulation to a C implementation routine
11981 * and returns, taking four arguments in addition to the standard ones.
11982 *
11983 * @param a_pfnCImpl The pointer to the C routine.
11984 * @param a0 The first extra argument.
11985 * @param a1 The second extra argument.
11986 * @param a2 The third extra argument.
11987 * @param a3 The fourth extra argument.
11988 */
11989#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11990
11991/**
11992 * Defers the rest of the instruction emulation to a C implementation routine
11993 * and returns, taking two arguments in addition to the standard ones.
11994 *
11995 * @param a_pfnCImpl The pointer to the C routine.
11996 * @param a0 The first extra argument.
11997 * @param a1 The second extra argument.
11998 * @param a2 The third extra argument.
11999 * @param a3 The fourth extra argument.
12000 * @param a4 The fifth extra argument.
12001 */
12002#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12003
12004/**
12005 * Defers the entire instruction emulation to a C implementation routine and
12006 * returns, only taking the standard parameters.
12007 *
12008 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12009 *
12010 * @param a_pfnCImpl The pointer to the C routine.
12011 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12012 */
12013#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12014
12015/**
12016 * Defers the entire instruction emulation to a C implementation routine and
12017 * returns, taking one argument in addition to the standard ones.
12018 *
12019 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12020 *
12021 * @param a_pfnCImpl The pointer to the C routine.
12022 * @param a0 The argument.
12023 */
12024#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12025
12026/**
12027 * Defers the entire instruction emulation to a C implementation routine and
12028 * returns, taking two arguments in addition to the standard ones.
12029 *
12030 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12031 *
12032 * @param a_pfnCImpl The pointer to the C routine.
12033 * @param a0 The first extra argument.
12034 * @param a1 The second extra argument.
12035 */
12036#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12037
12038/**
12039 * Defers the entire instruction emulation to a C implementation routine and
12040 * returns, taking three arguments in addition to the standard ones.
12041 *
12042 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12043 *
12044 * @param a_pfnCImpl The pointer to the C routine.
12045 * @param a0 The first extra argument.
12046 * @param a1 The second extra argument.
12047 * @param a2 The third extra argument.
12048 */
12049#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12050
12051/**
12052 * Calls a FPU assembly implementation taking one visible argument.
12053 *
12054 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12055 * @param a0 The first extra argument.
12056 */
12057#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12058 do { \
12059 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12060 } while (0)
12061
12062/**
12063 * Calls a FPU assembly implementation taking two visible arguments.
12064 *
12065 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12066 * @param a0 The first extra argument.
12067 * @param a1 The second extra argument.
12068 */
12069#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12070 do { \
12071 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12072 } while (0)
12073
12074/**
12075 * Calls a FPU assembly implementation taking three visible arguments.
12076 *
12077 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12078 * @param a0 The first extra argument.
12079 * @param a1 The second extra argument.
12080 * @param a2 The third extra argument.
12081 */
12082#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12083 do { \
12084 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12085 } while (0)
12086
12087#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12088 do { \
12089 (a_FpuData).FSW = (a_FSW); \
12090 (a_FpuData).r80Result = *(a_pr80Value); \
12091 } while (0)
12092
12093/** Pushes FPU result onto the stack. */
12094#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12095 iemFpuPushResult(pVCpu, &a_FpuData)
12096/** Pushes FPU result onto the stack and sets the FPUDP. */
12097#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12098 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12099
12100/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12101#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12102 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12103
12104/** Stores FPU result in a stack register. */
12105#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12106 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12107/** Stores FPU result in a stack register and pops the stack. */
12108#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12109 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12110/** Stores FPU result in a stack register and sets the FPUDP. */
12111#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12112 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12113/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12114 * stack. */
12115#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12116 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12117
12118/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12119#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12120 iemFpuUpdateOpcodeAndIp(pVCpu)
12121/** Free a stack register (for FFREE and FFREEP). */
12122#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12123 iemFpuStackFree(pVCpu, a_iStReg)
12124/** Increment the FPU stack pointer. */
12125#define IEM_MC_FPU_STACK_INC_TOP() \
12126 iemFpuStackIncTop(pVCpu)
12127/** Decrement the FPU stack pointer. */
12128#define IEM_MC_FPU_STACK_DEC_TOP() \
12129 iemFpuStackDecTop(pVCpu)
12130
12131/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12132#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12133 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12134/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12135#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12136 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12137/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12138#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12139 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12140/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12141#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12142 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12143/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12144 * stack. */
12145#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12146 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12147/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12148#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12149 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12150
12151/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12152#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12153 iemFpuStackUnderflow(pVCpu, a_iStDst)
12154/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12155 * stack. */
12156#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12157 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12158/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12159 * FPUDS. */
12160#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12161 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12162/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12163 * FPUDS. Pops stack. */
12164#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12165 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12166/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12167 * stack twice. */
12168#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12169 iemFpuStackUnderflowThenPopPop(pVCpu)
12170/** Raises a FPU stack underflow exception for an instruction pushing a result
12171 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12172#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12173 iemFpuStackPushUnderflow(pVCpu)
12174/** Raises a FPU stack underflow exception for an instruction pushing a result
12175 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12176#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12177 iemFpuStackPushUnderflowTwo(pVCpu)
12178
12179/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12180 * FPUIP, FPUCS and FOP. */
12181#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12182 iemFpuStackPushOverflow(pVCpu)
12183/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12184 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12185#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12186 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12187/** Prepares for using the FPU state.
12188 * Ensures that we can use the host FPU in the current context (RC+R0.
12189 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12190#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12191/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12192#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12193/** Actualizes the guest FPU state so it can be accessed and modified. */
12194#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12195
12196/** Prepares for using the SSE state.
12197 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12198 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12199#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12200/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12201#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12202/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12203#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12204
12205/** Prepares for using the AVX state.
12206 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12207 * Ensures the guest AVX state in the CPUMCTX is up to date.
12208 * @note This will include the AVX512 state too when support for it is added
12209 * due to the zero extending feature of VEX instruction. */
12210#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12211/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12212#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12213/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12214#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12215
12216/**
12217 * Calls a MMX assembly implementation taking two visible arguments.
12218 *
12219 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12220 * @param a0 The first extra argument.
12221 * @param a1 The second extra argument.
12222 */
12223#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12224 do { \
12225 IEM_MC_PREPARE_FPU_USAGE(); \
12226 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12227 } while (0)
12228
12229/**
12230 * Calls a MMX assembly implementation taking three visible arguments.
12231 *
12232 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12233 * @param a0 The first extra argument.
12234 * @param a1 The second extra argument.
12235 * @param a2 The third extra argument.
12236 */
12237#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12238 do { \
12239 IEM_MC_PREPARE_FPU_USAGE(); \
12240 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12241 } while (0)
12242
12243
12244/**
12245 * Calls a SSE assembly implementation taking two visible arguments.
12246 *
12247 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12248 * @param a0 The first extra argument.
12249 * @param a1 The second extra argument.
12250 */
12251#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12252 do { \
12253 IEM_MC_PREPARE_SSE_USAGE(); \
12254 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12255 } while (0)
12256
12257/**
12258 * Calls a SSE assembly implementation taking three visible arguments.
12259 *
12260 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12261 * @param a0 The first extra argument.
12262 * @param a1 The second extra argument.
12263 * @param a2 The third extra argument.
12264 */
12265#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12266 do { \
12267 IEM_MC_PREPARE_SSE_USAGE(); \
12268 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12269 } while (0)
12270
12271
12272/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12273 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12274#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12275 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12276
12277/**
12278 * Calls a AVX assembly implementation taking two visible arguments.
12279 *
12280 * There is one implicit zero'th argument, a pointer to the extended state.
12281 *
12282 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12283 * @param a1 The first extra argument.
12284 * @param a2 The second extra argument.
12285 */
12286#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12287 do { \
12288 IEM_MC_PREPARE_AVX_USAGE(); \
12289 a_pfnAImpl(pXState, (a1), (a2)); \
12290 } while (0)
12291
12292/**
12293 * Calls a AVX assembly implementation taking three visible arguments.
12294 *
12295 * There is one implicit zero'th argument, a pointer to the extended state.
12296 *
12297 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12298 * @param a1 The first extra argument.
12299 * @param a2 The second extra argument.
12300 * @param a3 The third extra argument.
12301 */
12302#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12303 do { \
12304 IEM_MC_PREPARE_AVX_USAGE(); \
12305 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12306 } while (0)
12307
12308/** @note Not for IOPL or IF testing. */
12309#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12310/** @note Not for IOPL or IF testing. */
12311#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12312/** @note Not for IOPL or IF testing. */
12313#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12314/** @note Not for IOPL or IF testing. */
12315#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12316/** @note Not for IOPL or IF testing. */
12317#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12318 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12319 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12320/** @note Not for IOPL or IF testing. */
12321#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12322 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12323 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12324/** @note Not for IOPL or IF testing. */
12325#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12326 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12327 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12328 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12329/** @note Not for IOPL or IF testing. */
12330#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12331 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12332 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12333 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12334#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12335#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12336#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12337/** @note Not for IOPL or IF testing. */
12338#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12339 if ( pVCpu->cpum.GstCtx.cx != 0 \
12340 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12341/** @note Not for IOPL or IF testing. */
12342#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12343 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12344 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12345/** @note Not for IOPL or IF testing. */
12346#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12347 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12348 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12349/** @note Not for IOPL or IF testing. */
12350#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12351 if ( pVCpu->cpum.GstCtx.cx != 0 \
12352 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12353/** @note Not for IOPL or IF testing. */
12354#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12355 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12356 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12357/** @note Not for IOPL or IF testing. */
12358#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12359 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12360 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12361#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12362#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12363
12364#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12365 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12366#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12367 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12368#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12369 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12370#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12371 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12372#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12373 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12374#define IEM_MC_IF_FCW_IM() \
12375 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12376
12377#define IEM_MC_ELSE() } else {
12378#define IEM_MC_ENDIF() } do {} while (0)
12379
12380/** @} */
12381
12382
12383/** @name Opcode Debug Helpers.
12384 * @{
12385 */
12386#ifdef VBOX_WITH_STATISTICS
12387# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12388#else
12389# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12390#endif
12391
12392#ifdef DEBUG
12393# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12394 do { \
12395 IEMOP_INC_STATS(a_Stats); \
12396 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12397 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12398 } while (0)
12399
12400# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12401 do { \
12402 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12403 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12404 (void)RT_CONCAT(OP_,a_Upper); \
12405 (void)(a_fDisHints); \
12406 (void)(a_fIemHints); \
12407 } while (0)
12408
12409# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12410 do { \
12411 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12412 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12413 (void)RT_CONCAT(OP_,a_Upper); \
12414 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12415 (void)(a_fDisHints); \
12416 (void)(a_fIemHints); \
12417 } while (0)
12418
12419# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12420 do { \
12421 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12422 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12423 (void)RT_CONCAT(OP_,a_Upper); \
12424 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12425 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12426 (void)(a_fDisHints); \
12427 (void)(a_fIemHints); \
12428 } while (0)
12429
12430# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12431 do { \
12432 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12433 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12434 (void)RT_CONCAT(OP_,a_Upper); \
12435 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12436 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12437 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12438 (void)(a_fDisHints); \
12439 (void)(a_fIemHints); \
12440 } while (0)
12441
12442# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12443 do { \
12444 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12445 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12446 (void)RT_CONCAT(OP_,a_Upper); \
12447 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12448 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12449 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12450 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12451 (void)(a_fDisHints); \
12452 (void)(a_fIemHints); \
12453 } while (0)
12454
12455#else
12456# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12457
12458# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12459 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12460# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12461 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12462# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12463 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12464# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12465 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12466# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12467 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12468
12469#endif
12470
12471#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12472 IEMOP_MNEMONIC0EX(a_Lower, \
12473 #a_Lower, \
12474 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12475#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12476 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12477 #a_Lower " " #a_Op1, \
12478 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12479#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12480 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12481 #a_Lower " " #a_Op1 "," #a_Op2, \
12482 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12483#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12484 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12485 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12486 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12487#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12488 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12489 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12490 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12491
12492/** @} */
12493
12494
12495/** @name Opcode Helpers.
12496 * @{
12497 */
12498
12499#ifdef IN_RING3
12500# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12501 do { \
12502 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12503 else \
12504 { \
12505 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12506 return IEMOP_RAISE_INVALID_OPCODE(); \
12507 } \
12508 } while (0)
12509#else
12510# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12511 do { \
12512 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12513 else return IEMOP_RAISE_INVALID_OPCODE(); \
12514 } while (0)
12515#endif
12516
12517/** The instruction requires a 186 or later. */
12518#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12519# define IEMOP_HLP_MIN_186() do { } while (0)
12520#else
12521# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12522#endif
12523
12524/** The instruction requires a 286 or later. */
12525#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12526# define IEMOP_HLP_MIN_286() do { } while (0)
12527#else
12528# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12529#endif
12530
12531/** The instruction requires a 386 or later. */
12532#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12533# define IEMOP_HLP_MIN_386() do { } while (0)
12534#else
12535# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12536#endif
12537
12538/** The instruction requires a 386 or later if the given expression is true. */
12539#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12540# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12541#else
12542# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12543#endif
12544
12545/** The instruction requires a 486 or later. */
12546#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12547# define IEMOP_HLP_MIN_486() do { } while (0)
12548#else
12549# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12550#endif
12551
12552/** The instruction requires a Pentium (586) or later. */
12553#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12554# define IEMOP_HLP_MIN_586() do { } while (0)
12555#else
12556# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12557#endif
12558
12559/** The instruction requires a PentiumPro (686) or later. */
12560#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12561# define IEMOP_HLP_MIN_686() do { } while (0)
12562#else
12563# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12564#endif
12565
12566
12567/** The instruction raises an \#UD in real and V8086 mode. */
12568#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12569 do \
12570 { \
12571 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12572 else return IEMOP_RAISE_INVALID_OPCODE(); \
12573 } while (0)
12574
12575#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12576/** This instruction raises an \#UD in real and V8086 mode or when not using a
12577 * 64-bit code segment when in long mode (applicable to all VMX instructions
12578 * except VMCALL).
12579 */
12580#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12581 do \
12582 { \
12583 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12584 && ( !IEM_IS_LONG_MODE(pVCpu) \
12585 || IEM_IS_64BIT_CODE(pVCpu))) \
12586 { /* likely */ } \
12587 else \
12588 { \
12589 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12590 { \
12591 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12592 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12593 return IEMOP_RAISE_INVALID_OPCODE(); \
12594 } \
12595 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12596 { \
12597 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12598 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12599 return IEMOP_RAISE_INVALID_OPCODE(); \
12600 } \
12601 } \
12602 } while (0)
12603
12604/** The instruction can only be executed in VMX operation (VMX root mode and
12605 * non-root mode).
12606 *
12607 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12608 */
12609# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12610 do \
12611 { \
12612 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12613 else \
12614 { \
12615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12616 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12617 return IEMOP_RAISE_INVALID_OPCODE(); \
12618 } \
12619 } while (0)
12620#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12621
12622/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12623 * 64-bit mode. */
12624#define IEMOP_HLP_NO_64BIT() \
12625 do \
12626 { \
12627 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12628 return IEMOP_RAISE_INVALID_OPCODE(); \
12629 } while (0)
12630
12631/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12632 * 64-bit mode. */
12633#define IEMOP_HLP_ONLY_64BIT() \
12634 do \
12635 { \
12636 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12637 return IEMOP_RAISE_INVALID_OPCODE(); \
12638 } while (0)
12639
12640/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12641#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12642 do \
12643 { \
12644 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12645 iemRecalEffOpSize64Default(pVCpu); \
12646 } while (0)
12647
12648/** The instruction has 64-bit operand size if 64-bit mode. */
12649#define IEMOP_HLP_64BIT_OP_SIZE() \
12650 do \
12651 { \
12652 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12653 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12654 } while (0)
12655
12656/** Only a REX prefix immediately preceeding the first opcode byte takes
12657 * effect. This macro helps ensuring this as well as logging bad guest code. */
12658#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12659 do \
12660 { \
12661 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12662 { \
12663 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12664 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12665 pVCpu->iem.s.uRexB = 0; \
12666 pVCpu->iem.s.uRexIndex = 0; \
12667 pVCpu->iem.s.uRexReg = 0; \
12668 iemRecalEffOpSize(pVCpu); \
12669 } \
12670 } while (0)
12671
12672/**
12673 * Done decoding.
12674 */
12675#define IEMOP_HLP_DONE_DECODING() \
12676 do \
12677 { \
12678 /*nothing for now, maybe later... */ \
12679 } while (0)
12680
12681/**
12682 * Done decoding, raise \#UD exception if lock prefix present.
12683 */
12684#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12685 do \
12686 { \
12687 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12688 { /* likely */ } \
12689 else \
12690 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12691 } while (0)
12692
12693
12694/**
12695 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12696 * repnz or size prefixes are present, or if in real or v8086 mode.
12697 */
12698#define IEMOP_HLP_DONE_VEX_DECODING() \
12699 do \
12700 { \
12701 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12702 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12703 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12704 { /* likely */ } \
12705 else \
12706 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12707 } while (0)
12708
12709/**
12710 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12711 * repnz or size prefixes are present, or if in real or v8086 mode.
12712 */
12713#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12714 do \
12715 { \
12716 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12717 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12718 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12719 && pVCpu->iem.s.uVexLength == 0)) \
12720 { /* likely */ } \
12721 else \
12722 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12723 } while (0)
12724
12725
12726/**
12727 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12728 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12729 * register 0, or if in real or v8086 mode.
12730 */
12731#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12732 do \
12733 { \
12734 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12735 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12736 && !pVCpu->iem.s.uVex3rdReg \
12737 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12738 { /* likely */ } \
12739 else \
12740 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12741 } while (0)
12742
12743/**
12744 * Done decoding VEX, no V, L=0.
12745 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12746 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12747 */
12748#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12749 do \
12750 { \
12751 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12752 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12753 && pVCpu->iem.s.uVexLength == 0 \
12754 && pVCpu->iem.s.uVex3rdReg == 0 \
12755 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12756 { /* likely */ } \
12757 else \
12758 return IEMOP_RAISE_INVALID_OPCODE(); \
12759 } while (0)
12760
12761#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12762 do \
12763 { \
12764 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12765 { /* likely */ } \
12766 else \
12767 { \
12768 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12769 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12770 } \
12771 } while (0)
12772#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12773 do \
12774 { \
12775 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12776 { /* likely */ } \
12777 else \
12778 { \
12779 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12780 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12781 } \
12782 } while (0)
12783
12784/**
12785 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12786 * are present.
12787 */
12788#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12789 do \
12790 { \
12791 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12792 { /* likely */ } \
12793 else \
12794 return IEMOP_RAISE_INVALID_OPCODE(); \
12795 } while (0)
12796
12797/**
12798 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12799 * prefixes are present.
12800 */
12801#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12802 do \
12803 { \
12804 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12805 { /* likely */ } \
12806 else \
12807 return IEMOP_RAISE_INVALID_OPCODE(); \
12808 } while (0)
12809
12810
12811/**
12812 * Calculates the effective address of a ModR/M memory operand.
12813 *
12814 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12815 *
12816 * @return Strict VBox status code.
12817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12818 * @param bRm The ModRM byte.
12819 * @param cbImm The size of any immediate following the
12820 * effective address opcode bytes. Important for
12821 * RIP relative addressing.
12822 * @param pGCPtrEff Where to return the effective address.
12823 */
12824IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12825{
12826 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12827# define SET_SS_DEF() \
12828 do \
12829 { \
12830 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12831 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12832 } while (0)
12833
12834 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12835 {
12836/** @todo Check the effective address size crap! */
12837 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12838 {
12839 uint16_t u16EffAddr;
12840
12841 /* Handle the disp16 form with no registers first. */
12842 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12843 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12844 else
12845 {
12846 /* Get the displacment. */
12847 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12848 {
12849 case 0: u16EffAddr = 0; break;
12850 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12851 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12852 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12853 }
12854
12855 /* Add the base and index registers to the disp. */
12856 switch (bRm & X86_MODRM_RM_MASK)
12857 {
12858 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12859 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12860 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12861 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12862 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12863 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12864 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12865 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12866 }
12867 }
12868
12869 *pGCPtrEff = u16EffAddr;
12870 }
12871 else
12872 {
12873 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12874 uint32_t u32EffAddr;
12875
12876 /* Handle the disp32 form with no registers first. */
12877 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12878 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12879 else
12880 {
12881 /* Get the register (or SIB) value. */
12882 switch ((bRm & X86_MODRM_RM_MASK))
12883 {
12884 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12885 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12886 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12887 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12888 case 4: /* SIB */
12889 {
12890 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12891
12892 /* Get the index and scale it. */
12893 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12894 {
12895 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12896 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12897 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12898 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12899 case 4: u32EffAddr = 0; /*none */ break;
12900 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12901 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12902 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12904 }
12905 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12906
12907 /* add base */
12908 switch (bSib & X86_SIB_BASE_MASK)
12909 {
12910 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12911 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12912 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12913 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12914 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12915 case 5:
12916 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12917 {
12918 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12919 SET_SS_DEF();
12920 }
12921 else
12922 {
12923 uint32_t u32Disp;
12924 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12925 u32EffAddr += u32Disp;
12926 }
12927 break;
12928 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12929 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12930 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12931 }
12932 break;
12933 }
12934 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12935 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12936 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12938 }
12939
12940 /* Get and add the displacement. */
12941 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12942 {
12943 case 0:
12944 break;
12945 case 1:
12946 {
12947 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12948 u32EffAddr += i8Disp;
12949 break;
12950 }
12951 case 2:
12952 {
12953 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12954 u32EffAddr += u32Disp;
12955 break;
12956 }
12957 default:
12958 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12959 }
12960
12961 }
12962 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12963 *pGCPtrEff = u32EffAddr;
12964 else
12965 {
12966 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12967 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12968 }
12969 }
12970 }
12971 else
12972 {
12973 uint64_t u64EffAddr;
12974
12975 /* Handle the rip+disp32 form with no registers first. */
12976 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12977 {
12978 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12979 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12980 }
12981 else
12982 {
12983 /* Get the register (or SIB) value. */
12984 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12985 {
12986 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12987 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12988 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12989 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12990 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12991 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12992 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12993 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12994 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12995 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12996 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12997 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12998 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12999 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13000 /* SIB */
13001 case 4:
13002 case 12:
13003 {
13004 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13005
13006 /* Get the index and scale it. */
13007 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13008 {
13009 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13010 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13011 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13012 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13013 case 4: u64EffAddr = 0; /*none */ break;
13014 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13015 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13016 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13017 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13018 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13019 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13020 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13021 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13022 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13023 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13024 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13026 }
13027 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13028
13029 /* add base */
13030 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13031 {
13032 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13033 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13034 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13035 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13036 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13037 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13038 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13039 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13040 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13041 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13042 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13043 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13044 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13045 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13046 /* complicated encodings */
13047 case 5:
13048 case 13:
13049 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13050 {
13051 if (!pVCpu->iem.s.uRexB)
13052 {
13053 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13054 SET_SS_DEF();
13055 }
13056 else
13057 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13058 }
13059 else
13060 {
13061 uint32_t u32Disp;
13062 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13063 u64EffAddr += (int32_t)u32Disp;
13064 }
13065 break;
13066 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13067 }
13068 break;
13069 }
13070 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13071 }
13072
13073 /* Get and add the displacement. */
13074 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13075 {
13076 case 0:
13077 break;
13078 case 1:
13079 {
13080 int8_t i8Disp;
13081 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13082 u64EffAddr += i8Disp;
13083 break;
13084 }
13085 case 2:
13086 {
13087 uint32_t u32Disp;
13088 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13089 u64EffAddr += (int32_t)u32Disp;
13090 break;
13091 }
13092 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13093 }
13094
13095 }
13096
13097 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13098 *pGCPtrEff = u64EffAddr;
13099 else
13100 {
13101 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13102 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13103 }
13104 }
13105
13106 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13107 return VINF_SUCCESS;
13108}
13109
13110
13111/**
13112 * Calculates the effective address of a ModR/M memory operand.
13113 *
13114 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13115 *
13116 * @return Strict VBox status code.
13117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13118 * @param bRm The ModRM byte.
13119 * @param cbImm The size of any immediate following the
13120 * effective address opcode bytes. Important for
13121 * RIP relative addressing.
13122 * @param pGCPtrEff Where to return the effective address.
13123 * @param offRsp RSP displacement.
13124 */
13125IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13126{
13127 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13128# define SET_SS_DEF() \
13129 do \
13130 { \
13131 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13132 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13133 } while (0)
13134
13135 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13136 {
13137/** @todo Check the effective address size crap! */
13138 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13139 {
13140 uint16_t u16EffAddr;
13141
13142 /* Handle the disp16 form with no registers first. */
13143 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13144 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13145 else
13146 {
13147 /* Get the displacment. */
13148 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13149 {
13150 case 0: u16EffAddr = 0; break;
13151 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13152 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13153 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13154 }
13155
13156 /* Add the base and index registers to the disp. */
13157 switch (bRm & X86_MODRM_RM_MASK)
13158 {
13159 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13160 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13161 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13162 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13163 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13164 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13165 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13166 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13167 }
13168 }
13169
13170 *pGCPtrEff = u16EffAddr;
13171 }
13172 else
13173 {
13174 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13175 uint32_t u32EffAddr;
13176
13177 /* Handle the disp32 form with no registers first. */
13178 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13179 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13180 else
13181 {
13182 /* Get the register (or SIB) value. */
13183 switch ((bRm & X86_MODRM_RM_MASK))
13184 {
13185 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13186 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13187 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13188 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13189 case 4: /* SIB */
13190 {
13191 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13192
13193 /* Get the index and scale it. */
13194 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13195 {
13196 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13197 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13198 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13199 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13200 case 4: u32EffAddr = 0; /*none */ break;
13201 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13202 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13203 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13204 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13205 }
13206 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13207
13208 /* add base */
13209 switch (bSib & X86_SIB_BASE_MASK)
13210 {
13211 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13212 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13213 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13214 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13215 case 4:
13216 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13217 SET_SS_DEF();
13218 break;
13219 case 5:
13220 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13221 {
13222 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13223 SET_SS_DEF();
13224 }
13225 else
13226 {
13227 uint32_t u32Disp;
13228 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13229 u32EffAddr += u32Disp;
13230 }
13231 break;
13232 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13233 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13234 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13235 }
13236 break;
13237 }
13238 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13239 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13240 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13241 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13242 }
13243
13244 /* Get and add the displacement. */
13245 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13246 {
13247 case 0:
13248 break;
13249 case 1:
13250 {
13251 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13252 u32EffAddr += i8Disp;
13253 break;
13254 }
13255 case 2:
13256 {
13257 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13258 u32EffAddr += u32Disp;
13259 break;
13260 }
13261 default:
13262 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13263 }
13264
13265 }
13266 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13267 *pGCPtrEff = u32EffAddr;
13268 else
13269 {
13270 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13271 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13272 }
13273 }
13274 }
13275 else
13276 {
13277 uint64_t u64EffAddr;
13278
13279 /* Handle the rip+disp32 form with no registers first. */
13280 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13281 {
13282 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13283 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13284 }
13285 else
13286 {
13287 /* Get the register (or SIB) value. */
13288 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13289 {
13290 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13291 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13292 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13293 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13294 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13295 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13296 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13297 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13298 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13299 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13300 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13301 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13302 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13303 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13304 /* SIB */
13305 case 4:
13306 case 12:
13307 {
13308 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13309
13310 /* Get the index and scale it. */
13311 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13312 {
13313 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13314 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13315 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13316 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13317 case 4: u64EffAddr = 0; /*none */ break;
13318 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13319 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13320 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13321 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13322 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13323 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13324 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13325 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13326 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13327 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13328 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13329 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13330 }
13331 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13332
13333 /* add base */
13334 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13335 {
13336 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13337 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13338 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13339 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13340 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13341 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13342 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13343 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13344 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13345 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13346 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13347 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13348 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13349 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13350 /* complicated encodings */
13351 case 5:
13352 case 13:
13353 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13354 {
13355 if (!pVCpu->iem.s.uRexB)
13356 {
13357 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13358 SET_SS_DEF();
13359 }
13360 else
13361 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13362 }
13363 else
13364 {
13365 uint32_t u32Disp;
13366 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13367 u64EffAddr += (int32_t)u32Disp;
13368 }
13369 break;
13370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13371 }
13372 break;
13373 }
13374 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13375 }
13376
13377 /* Get and add the displacement. */
13378 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13379 {
13380 case 0:
13381 break;
13382 case 1:
13383 {
13384 int8_t i8Disp;
13385 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13386 u64EffAddr += i8Disp;
13387 break;
13388 }
13389 case 2:
13390 {
13391 uint32_t u32Disp;
13392 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13393 u64EffAddr += (int32_t)u32Disp;
13394 break;
13395 }
13396 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13397 }
13398
13399 }
13400
13401 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13402 *pGCPtrEff = u64EffAddr;
13403 else
13404 {
13405 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13406 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13407 }
13408 }
13409
13410 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13411 return VINF_SUCCESS;
13412}
13413
13414
13415#ifdef IEM_WITH_SETJMP
13416/**
13417 * Calculates the effective address of a ModR/M memory operand.
13418 *
13419 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13420 *
13421 * May longjmp on internal error.
13422 *
13423 * @return The effective address.
13424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13425 * @param bRm The ModRM byte.
13426 * @param cbImm The size of any immediate following the
13427 * effective address opcode bytes. Important for
13428 * RIP relative addressing.
13429 */
13430IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13431{
13432 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13433# define SET_SS_DEF() \
13434 do \
13435 { \
13436 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13437 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13438 } while (0)
13439
13440 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13441 {
13442/** @todo Check the effective address size crap! */
13443 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13444 {
13445 uint16_t u16EffAddr;
13446
13447 /* Handle the disp16 form with no registers first. */
13448 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13449 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13450 else
13451 {
13452 /* Get the displacment. */
13453 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13454 {
13455 case 0: u16EffAddr = 0; break;
13456 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13457 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13458 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13459 }
13460
13461 /* Add the base and index registers to the disp. */
13462 switch (bRm & X86_MODRM_RM_MASK)
13463 {
13464 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13465 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13466 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13467 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13468 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13469 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13470 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13471 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13472 }
13473 }
13474
13475 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13476 return u16EffAddr;
13477 }
13478
13479 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13480 uint32_t u32EffAddr;
13481
13482 /* Handle the disp32 form with no registers first. */
13483 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13484 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13485 else
13486 {
13487 /* Get the register (or SIB) value. */
13488 switch ((bRm & X86_MODRM_RM_MASK))
13489 {
13490 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13491 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13492 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13493 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13494 case 4: /* SIB */
13495 {
13496 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13497
13498 /* Get the index and scale it. */
13499 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13500 {
13501 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13502 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13503 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13504 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13505 case 4: u32EffAddr = 0; /*none */ break;
13506 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13507 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13508 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13509 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13510 }
13511 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13512
13513 /* add base */
13514 switch (bSib & X86_SIB_BASE_MASK)
13515 {
13516 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13517 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13518 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13519 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13520 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13521 case 5:
13522 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13523 {
13524 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13525 SET_SS_DEF();
13526 }
13527 else
13528 {
13529 uint32_t u32Disp;
13530 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13531 u32EffAddr += u32Disp;
13532 }
13533 break;
13534 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13535 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13536 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13537 }
13538 break;
13539 }
13540 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13541 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13542 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13543 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13544 }
13545
13546 /* Get and add the displacement. */
13547 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13548 {
13549 case 0:
13550 break;
13551 case 1:
13552 {
13553 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13554 u32EffAddr += i8Disp;
13555 break;
13556 }
13557 case 2:
13558 {
13559 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13560 u32EffAddr += u32Disp;
13561 break;
13562 }
13563 default:
13564 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13565 }
13566 }
13567
13568 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13569 {
13570 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13571 return u32EffAddr;
13572 }
13573 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13574 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13575 return u32EffAddr & UINT16_MAX;
13576 }
13577
13578 uint64_t u64EffAddr;
13579
13580 /* Handle the rip+disp32 form with no registers first. */
13581 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13582 {
13583 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13584 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13585 }
13586 else
13587 {
13588 /* Get the register (or SIB) value. */
13589 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13590 {
13591 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13592 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13593 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13594 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13595 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13596 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13597 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13598 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13599 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13600 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13601 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13602 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13603 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13604 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13605 /* SIB */
13606 case 4:
13607 case 12:
13608 {
13609 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13610
13611 /* Get the index and scale it. */
13612 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13613 {
13614 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13615 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13616 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13617 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13618 case 4: u64EffAddr = 0; /*none */ break;
13619 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13620 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13621 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13622 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13623 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13624 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13625 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13626 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13627 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13628 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13629 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13630 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13631 }
13632 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13633
13634 /* add base */
13635 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13636 {
13637 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13638 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13639 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13640 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13641 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13642 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13643 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13644 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13645 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13646 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13647 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13648 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13649 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13650 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13651 /* complicated encodings */
13652 case 5:
13653 case 13:
13654 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13655 {
13656 if (!pVCpu->iem.s.uRexB)
13657 {
13658 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13659 SET_SS_DEF();
13660 }
13661 else
13662 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13663 }
13664 else
13665 {
13666 uint32_t u32Disp;
13667 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13668 u64EffAddr += (int32_t)u32Disp;
13669 }
13670 break;
13671 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13672 }
13673 break;
13674 }
13675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13676 }
13677
13678 /* Get and add the displacement. */
13679 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13680 {
13681 case 0:
13682 break;
13683 case 1:
13684 {
13685 int8_t i8Disp;
13686 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13687 u64EffAddr += i8Disp;
13688 break;
13689 }
13690 case 2:
13691 {
13692 uint32_t u32Disp;
13693 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13694 u64EffAddr += (int32_t)u32Disp;
13695 break;
13696 }
13697 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13698 }
13699
13700 }
13701
13702 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13703 {
13704 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13705 return u64EffAddr;
13706 }
13707 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13708 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13709 return u64EffAddr & UINT32_MAX;
13710}
13711#endif /* IEM_WITH_SETJMP */
13712
13713/** @} */
13714
13715
13716
13717/*
13718 * Include the instructions
13719 */
13720#include "IEMAllInstructions.cpp.h"
13721
13722
13723
13724#ifdef LOG_ENABLED
13725/**
13726 * Logs the current instruction.
13727 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13728 * @param fSameCtx Set if we have the same context information as the VMM,
13729 * clear if we may have already executed an instruction in
13730 * our debug context. When clear, we assume IEMCPU holds
13731 * valid CPU mode info.
13732 *
13733 * The @a fSameCtx parameter is now misleading and obsolete.
13734 * @param pszFunction The IEM function doing the execution.
13735 */
13736IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13737{
13738# ifdef IN_RING3
13739 if (LogIs2Enabled())
13740 {
13741 char szInstr[256];
13742 uint32_t cbInstr = 0;
13743 if (fSameCtx)
13744 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13745 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13746 szInstr, sizeof(szInstr), &cbInstr);
13747 else
13748 {
13749 uint32_t fFlags = 0;
13750 switch (pVCpu->iem.s.enmCpuMode)
13751 {
13752 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13753 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13754 case IEMMODE_16BIT:
13755 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13756 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13757 else
13758 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13759 break;
13760 }
13761 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13762 szInstr, sizeof(szInstr), &cbInstr);
13763 }
13764
13765 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13766 Log2(("**** %s\n"
13767 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13768 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13769 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13770 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13771 " %s\n"
13772 , pszFunction,
13773 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13774 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13776 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13777 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13778 szInstr));
13779
13780 if (LogIs3Enabled())
13781 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13782 }
13783 else
13784# endif
13785 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13786 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13787 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13788}
13789#endif /* LOG_ENABLED */
13790
13791
13792/**
13793 * Makes status code addjustments (pass up from I/O and access handler)
13794 * as well as maintaining statistics.
13795 *
13796 * @returns Strict VBox status code to pass up.
13797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13798 * @param rcStrict The status from executing an instruction.
13799 */
13800DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13801{
13802 if (rcStrict != VINF_SUCCESS)
13803 {
13804 if (RT_SUCCESS(rcStrict))
13805 {
13806 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13807 || rcStrict == VINF_IOM_R3_IOPORT_READ
13808 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13809 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13810 || rcStrict == VINF_IOM_R3_MMIO_READ
13811 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13812 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13813 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13814 || rcStrict == VINF_CPUM_R3_MSR_READ
13815 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13816 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13817 || rcStrict == VINF_EM_RAW_TO_R3
13818 || rcStrict == VINF_EM_TRIPLE_FAULT
13819 || rcStrict == VINF_GIM_R3_HYPERCALL
13820 /* raw-mode / virt handlers only: */
13821 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13822 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13823 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13824 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13825 || rcStrict == VINF_SELM_SYNC_GDT
13826 || rcStrict == VINF_CSAM_PENDING_ACTION
13827 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13828 /* nested hw.virt codes: */
13829 || rcStrict == VINF_SVM_VMEXIT
13830 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13831/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13832 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13833#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13834 if ( rcStrict == VINF_SVM_VMEXIT
13835 && rcPassUp == VINF_SUCCESS)
13836 rcStrict = VINF_SUCCESS;
13837 else
13838#endif
13839 if (rcPassUp == VINF_SUCCESS)
13840 pVCpu->iem.s.cRetInfStatuses++;
13841 else if ( rcPassUp < VINF_EM_FIRST
13842 || rcPassUp > VINF_EM_LAST
13843 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13844 {
13845 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13846 pVCpu->iem.s.cRetPassUpStatus++;
13847 rcStrict = rcPassUp;
13848 }
13849 else
13850 {
13851 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13852 pVCpu->iem.s.cRetInfStatuses++;
13853 }
13854 }
13855 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13856 pVCpu->iem.s.cRetAspectNotImplemented++;
13857 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13858 pVCpu->iem.s.cRetInstrNotImplemented++;
13859 else
13860 pVCpu->iem.s.cRetErrStatuses++;
13861 }
13862 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13863 {
13864 pVCpu->iem.s.cRetPassUpStatus++;
13865 rcStrict = pVCpu->iem.s.rcPassUp;
13866 }
13867
13868 return rcStrict;
13869}
13870
13871
13872/**
13873 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13874 * IEMExecOneWithPrefetchedByPC.
13875 *
13876 * Similar code is found in IEMExecLots.
13877 *
13878 * @return Strict VBox status code.
13879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13880 * @param fExecuteInhibit If set, execute the instruction following CLI,
13881 * POP SS and MOV SS,GR.
13882 * @param pszFunction The calling function name.
13883 */
13884DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13885{
13886 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13887 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13888 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13889 RT_NOREF_PV(pszFunction);
13890
13891#ifdef IEM_WITH_SETJMP
13892 VBOXSTRICTRC rcStrict;
13893 jmp_buf JmpBuf;
13894 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13895 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13896 if ((rcStrict = setjmp(JmpBuf)) == 0)
13897 {
13898 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13899 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13900 }
13901 else
13902 pVCpu->iem.s.cLongJumps++;
13903 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13904#else
13905 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13906 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13907#endif
13908 if (rcStrict == VINF_SUCCESS)
13909 pVCpu->iem.s.cInstructions++;
13910 if (pVCpu->iem.s.cActiveMappings > 0)
13911 {
13912 Assert(rcStrict != VINF_SUCCESS);
13913 iemMemRollback(pVCpu);
13914 }
13915 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13916 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13917 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13918
13919//#ifdef DEBUG
13920// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13921//#endif
13922
13923 /* Execute the next instruction as well if a cli, pop ss or
13924 mov ss, Gr has just completed successfully. */
13925 if ( fExecuteInhibit
13926 && rcStrict == VINF_SUCCESS
13927 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13928 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13929 {
13930 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13931 if (rcStrict == VINF_SUCCESS)
13932 {
13933#ifdef LOG_ENABLED
13934 iemLogCurInstr(pVCpu, false, pszFunction);
13935#endif
13936#ifdef IEM_WITH_SETJMP
13937 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13938 if ((rcStrict = setjmp(JmpBuf)) == 0)
13939 {
13940 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13941 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13942 }
13943 else
13944 pVCpu->iem.s.cLongJumps++;
13945 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13946#else
13947 IEM_OPCODE_GET_NEXT_U8(&b);
13948 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13949#endif
13950 if (rcStrict == VINF_SUCCESS)
13951 pVCpu->iem.s.cInstructions++;
13952 if (pVCpu->iem.s.cActiveMappings > 0)
13953 {
13954 Assert(rcStrict != VINF_SUCCESS);
13955 iemMemRollback(pVCpu);
13956 }
13957 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13958 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13959 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13960 }
13961 else if (pVCpu->iem.s.cActiveMappings > 0)
13962 iemMemRollback(pVCpu);
13963 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13964 }
13965
13966 /*
13967 * Return value fiddling, statistics and sanity assertions.
13968 */
13969 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13970
13971 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13972 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13973 return rcStrict;
13974}
13975
13976
13977#ifdef IN_RC
13978/**
13979 * Re-enters raw-mode or ensure we return to ring-3.
13980 *
13981 * @returns rcStrict, maybe modified.
13982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13983 * @param rcStrict The status code returne by the interpreter.
13984 */
13985DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13986{
13987 if ( !pVCpu->iem.s.fInPatchCode
13988 && ( rcStrict == VINF_SUCCESS
13989 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13990 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13991 {
13992 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13993 CPUMRawEnter(pVCpu);
13994 else
13995 {
13996 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13997 rcStrict = VINF_EM_RESCHEDULE;
13998 }
13999 }
14000 return rcStrict;
14001}
14002#endif
14003
14004
14005/**
14006 * Execute one instruction.
14007 *
14008 * @return Strict VBox status code.
14009 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14010 */
14011VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14012{
14013#ifdef LOG_ENABLED
14014 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14015#endif
14016
14017 /*
14018 * Do the decoding and emulation.
14019 */
14020 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14021 if (rcStrict == VINF_SUCCESS)
14022 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14023 else if (pVCpu->iem.s.cActiveMappings > 0)
14024 iemMemRollback(pVCpu);
14025
14026#ifdef IN_RC
14027 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14028#endif
14029 if (rcStrict != VINF_SUCCESS)
14030 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14031 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14032 return rcStrict;
14033}
14034
14035
14036VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14037{
14038 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14039
14040 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14041 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14042 if (rcStrict == VINF_SUCCESS)
14043 {
14044 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14045 if (pcbWritten)
14046 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14047 }
14048 else if (pVCpu->iem.s.cActiveMappings > 0)
14049 iemMemRollback(pVCpu);
14050
14051#ifdef IN_RC
14052 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14053#endif
14054 return rcStrict;
14055}
14056
14057
14058VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14059 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14060{
14061 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14062
14063 VBOXSTRICTRC rcStrict;
14064 if ( cbOpcodeBytes
14065 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14066 {
14067 iemInitDecoder(pVCpu, false);
14068#ifdef IEM_WITH_CODE_TLB
14069 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14070 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14071 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14072 pVCpu->iem.s.offCurInstrStart = 0;
14073 pVCpu->iem.s.offInstrNextByte = 0;
14074#else
14075 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14076 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14077#endif
14078 rcStrict = VINF_SUCCESS;
14079 }
14080 else
14081 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14082 if (rcStrict == VINF_SUCCESS)
14083 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14084 else if (pVCpu->iem.s.cActiveMappings > 0)
14085 iemMemRollback(pVCpu);
14086
14087#ifdef IN_RC
14088 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14089#endif
14090 return rcStrict;
14091}
14092
14093
14094VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14095{
14096 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14097
14098 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14099 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14100 if (rcStrict == VINF_SUCCESS)
14101 {
14102 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14103 if (pcbWritten)
14104 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14105 }
14106 else if (pVCpu->iem.s.cActiveMappings > 0)
14107 iemMemRollback(pVCpu);
14108
14109#ifdef IN_RC
14110 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14111#endif
14112 return rcStrict;
14113}
14114
14115
14116VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14117 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14118{
14119 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14120
14121 VBOXSTRICTRC rcStrict;
14122 if ( cbOpcodeBytes
14123 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14124 {
14125 iemInitDecoder(pVCpu, true);
14126#ifdef IEM_WITH_CODE_TLB
14127 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14128 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14129 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14130 pVCpu->iem.s.offCurInstrStart = 0;
14131 pVCpu->iem.s.offInstrNextByte = 0;
14132#else
14133 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14134 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14135#endif
14136 rcStrict = VINF_SUCCESS;
14137 }
14138 else
14139 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14140 if (rcStrict == VINF_SUCCESS)
14141 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14142 else if (pVCpu->iem.s.cActiveMappings > 0)
14143 iemMemRollback(pVCpu);
14144
14145#ifdef IN_RC
14146 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14147#endif
14148 return rcStrict;
14149}
14150
14151
14152/**
14153 * For debugging DISGetParamSize, may come in handy.
14154 *
14155 * @returns Strict VBox status code.
14156 * @param pVCpu The cross context virtual CPU structure of the
14157 * calling EMT.
14158 * @param pCtxCore The context core structure.
14159 * @param OpcodeBytesPC The PC of the opcode bytes.
14160 * @param pvOpcodeBytes Prefeched opcode bytes.
14161 * @param cbOpcodeBytes Number of prefetched bytes.
14162 * @param pcbWritten Where to return the number of bytes written.
14163 * Optional.
14164 */
14165VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14166 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14167 uint32_t *pcbWritten)
14168{
14169 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14170
14171 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14172 VBOXSTRICTRC rcStrict;
14173 if ( cbOpcodeBytes
14174 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14175 {
14176 iemInitDecoder(pVCpu, true);
14177#ifdef IEM_WITH_CODE_TLB
14178 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14179 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14180 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14181 pVCpu->iem.s.offCurInstrStart = 0;
14182 pVCpu->iem.s.offInstrNextByte = 0;
14183#else
14184 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14185 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14186#endif
14187 rcStrict = VINF_SUCCESS;
14188 }
14189 else
14190 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14191 if (rcStrict == VINF_SUCCESS)
14192 {
14193 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14194 if (pcbWritten)
14195 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14196 }
14197 else if (pVCpu->iem.s.cActiveMappings > 0)
14198 iemMemRollback(pVCpu);
14199
14200#ifdef IN_RC
14201 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14202#endif
14203 return rcStrict;
14204}
14205
14206
14207VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14208{
14209 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14210
14211 /*
14212 * See if there is an interrupt pending in TRPM, inject it if we can.
14213 */
14214 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14215#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14216 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14217 if (fIntrEnabled)
14218 {
14219 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14220 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14221 else
14222 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14223 }
14224#else
14225 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14226#endif
14227 if ( fIntrEnabled
14228 && TRPMHasTrap(pVCpu)
14229 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14230 {
14231 uint8_t u8TrapNo;
14232 TRPMEVENT enmType;
14233 RTGCUINT uErrCode;
14234 RTGCPTR uCr2;
14235 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14236 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14237 TRPMResetTrap(pVCpu);
14238 }
14239
14240 /*
14241 * Initial decoder init w/ prefetch, then setup setjmp.
14242 */
14243 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14244 if (rcStrict == VINF_SUCCESS)
14245 {
14246#ifdef IEM_WITH_SETJMP
14247 jmp_buf JmpBuf;
14248 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14249 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14250 pVCpu->iem.s.cActiveMappings = 0;
14251 if ((rcStrict = setjmp(JmpBuf)) == 0)
14252#endif
14253 {
14254 /*
14255 * The run loop. We limit ourselves to 4096 instructions right now.
14256 */
14257 PVM pVM = pVCpu->CTX_SUFF(pVM);
14258 uint32_t cInstr = 4096;
14259 for (;;)
14260 {
14261 /*
14262 * Log the state.
14263 */
14264#ifdef LOG_ENABLED
14265 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14266#endif
14267
14268 /*
14269 * Do the decoding and emulation.
14270 */
14271 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14272 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14273 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14274 {
14275 Assert(pVCpu->iem.s.cActiveMappings == 0);
14276 pVCpu->iem.s.cInstructions++;
14277 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14278 {
14279 uint32_t fCpu = pVCpu->fLocalForcedActions
14280 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14281 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14282 | VMCPU_FF_TLB_FLUSH
14283#ifdef VBOX_WITH_RAW_MODE
14284 | VMCPU_FF_TRPM_SYNC_IDT
14285 | VMCPU_FF_SELM_SYNC_TSS
14286 | VMCPU_FF_SELM_SYNC_GDT
14287 | VMCPU_FF_SELM_SYNC_LDT
14288#endif
14289 | VMCPU_FF_INHIBIT_INTERRUPTS
14290 | VMCPU_FF_BLOCK_NMIS
14291 | VMCPU_FF_UNHALT ));
14292
14293 if (RT_LIKELY( ( !fCpu
14294 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14295 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14296 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14297 {
14298 if (cInstr-- > 0)
14299 {
14300 Assert(pVCpu->iem.s.cActiveMappings == 0);
14301 iemReInitDecoder(pVCpu);
14302 continue;
14303 }
14304 }
14305 }
14306 Assert(pVCpu->iem.s.cActiveMappings == 0);
14307 }
14308 else if (pVCpu->iem.s.cActiveMappings > 0)
14309 iemMemRollback(pVCpu);
14310 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14311 break;
14312 }
14313 }
14314#ifdef IEM_WITH_SETJMP
14315 else
14316 {
14317 if (pVCpu->iem.s.cActiveMappings > 0)
14318 iemMemRollback(pVCpu);
14319 pVCpu->iem.s.cLongJumps++;
14320 }
14321 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14322#endif
14323
14324 /*
14325 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14326 */
14327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14329 }
14330 else
14331 {
14332 if (pVCpu->iem.s.cActiveMappings > 0)
14333 iemMemRollback(pVCpu);
14334
14335#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14336 /*
14337 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14338 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14339 */
14340 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14341#endif
14342 }
14343
14344 /*
14345 * Maybe re-enter raw-mode and log.
14346 */
14347#ifdef IN_RC
14348 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14349#endif
14350 if (rcStrict != VINF_SUCCESS)
14351 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14352 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14353 if (pcInstructions)
14354 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14355 return rcStrict;
14356}
14357
14358
14359/**
14360 * Interface used by EMExecuteExec, does exit statistics and limits.
14361 *
14362 * @returns Strict VBox status code.
14363 * @param pVCpu The cross context virtual CPU structure.
14364 * @param fWillExit To be defined.
14365 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14366 * @param cMaxInstructions Maximum number of instructions to execute.
14367 * @param cMaxInstructionsWithoutExits
14368 * The max number of instructions without exits.
14369 * @param pStats Where to return statistics.
14370 */
14371VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14372 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14373{
14374 NOREF(fWillExit); /** @todo define flexible exit crits */
14375
14376 /*
14377 * Initialize return stats.
14378 */
14379 pStats->cInstructions = 0;
14380 pStats->cExits = 0;
14381 pStats->cMaxExitDistance = 0;
14382 pStats->cReserved = 0;
14383
14384 /*
14385 * Initial decoder init w/ prefetch, then setup setjmp.
14386 */
14387 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14388 if (rcStrict == VINF_SUCCESS)
14389 {
14390#ifdef IEM_WITH_SETJMP
14391 jmp_buf JmpBuf;
14392 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14393 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14394 pVCpu->iem.s.cActiveMappings = 0;
14395 if ((rcStrict = setjmp(JmpBuf)) == 0)
14396#endif
14397 {
14398#ifdef IN_RING0
14399 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14400#endif
14401 uint32_t cInstructionSinceLastExit = 0;
14402
14403 /*
14404 * The run loop. We limit ourselves to 4096 instructions right now.
14405 */
14406 PVM pVM = pVCpu->CTX_SUFF(pVM);
14407 for (;;)
14408 {
14409 /*
14410 * Log the state.
14411 */
14412#ifdef LOG_ENABLED
14413 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14414#endif
14415
14416 /*
14417 * Do the decoding and emulation.
14418 */
14419 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14420
14421 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14422 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14423
14424 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14425 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14426 {
14427 pStats->cExits += 1;
14428 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14429 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14430 cInstructionSinceLastExit = 0;
14431 }
14432
14433 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14434 {
14435 Assert(pVCpu->iem.s.cActiveMappings == 0);
14436 pVCpu->iem.s.cInstructions++;
14437 pStats->cInstructions++;
14438 cInstructionSinceLastExit++;
14439 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14440 {
14441 uint32_t fCpu = pVCpu->fLocalForcedActions
14442 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14443 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14444 | VMCPU_FF_TLB_FLUSH
14445#ifdef VBOX_WITH_RAW_MODE
14446 | VMCPU_FF_TRPM_SYNC_IDT
14447 | VMCPU_FF_SELM_SYNC_TSS
14448 | VMCPU_FF_SELM_SYNC_GDT
14449 | VMCPU_FF_SELM_SYNC_LDT
14450#endif
14451 | VMCPU_FF_INHIBIT_INTERRUPTS
14452 | VMCPU_FF_BLOCK_NMIS
14453 | VMCPU_FF_UNHALT ));
14454
14455 if (RT_LIKELY( ( ( !fCpu
14456 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14457 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14458 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14459 || pStats->cInstructions < cMinInstructions))
14460 {
14461 if (pStats->cInstructions < cMaxInstructions)
14462 {
14463 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14464 {
14465#ifdef IN_RING0
14466 if ( !fCheckPreemptionPending
14467 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14468#endif
14469 {
14470 Assert(pVCpu->iem.s.cActiveMappings == 0);
14471 iemReInitDecoder(pVCpu);
14472 continue;
14473 }
14474#ifdef IN_RING0
14475 rcStrict = VINF_EM_RAW_INTERRUPT;
14476 break;
14477#endif
14478 }
14479 }
14480 }
14481 Assert(!(fCpu & VMCPU_FF_IEM));
14482 }
14483 Assert(pVCpu->iem.s.cActiveMappings == 0);
14484 }
14485 else if (pVCpu->iem.s.cActiveMappings > 0)
14486 iemMemRollback(pVCpu);
14487 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14488 break;
14489 }
14490 }
14491#ifdef IEM_WITH_SETJMP
14492 else
14493 {
14494 if (pVCpu->iem.s.cActiveMappings > 0)
14495 iemMemRollback(pVCpu);
14496 pVCpu->iem.s.cLongJumps++;
14497 }
14498 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14499#endif
14500
14501 /*
14502 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14503 */
14504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14505 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14506 }
14507 else
14508 {
14509 if (pVCpu->iem.s.cActiveMappings > 0)
14510 iemMemRollback(pVCpu);
14511
14512#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14513 /*
14514 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14515 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14516 */
14517 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14518#endif
14519 }
14520
14521 /*
14522 * Maybe re-enter raw-mode and log.
14523 */
14524#ifdef IN_RC
14525 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14526#endif
14527 if (rcStrict != VINF_SUCCESS)
14528 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14530 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14531 return rcStrict;
14532}
14533
14534
14535/**
14536 * Injects a trap, fault, abort, software interrupt or external interrupt.
14537 *
14538 * The parameter list matches TRPMQueryTrapAll pretty closely.
14539 *
14540 * @returns Strict VBox status code.
14541 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14542 * @param u8TrapNo The trap number.
14543 * @param enmType What type is it (trap/fault/abort), software
14544 * interrupt or hardware interrupt.
14545 * @param uErrCode The error code if applicable.
14546 * @param uCr2 The CR2 value if applicable.
14547 * @param cbInstr The instruction length (only relevant for
14548 * software interrupts).
14549 */
14550VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14551 uint8_t cbInstr)
14552{
14553 iemInitDecoder(pVCpu, false);
14554#ifdef DBGFTRACE_ENABLED
14555 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14556 u8TrapNo, enmType, uErrCode, uCr2);
14557#endif
14558
14559 uint32_t fFlags;
14560 switch (enmType)
14561 {
14562 case TRPM_HARDWARE_INT:
14563 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14564 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14565 uErrCode = uCr2 = 0;
14566 break;
14567
14568 case TRPM_SOFTWARE_INT:
14569 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14570 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14571 uErrCode = uCr2 = 0;
14572 break;
14573
14574 case TRPM_TRAP:
14575 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14576 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14577 if (u8TrapNo == X86_XCPT_PF)
14578 fFlags |= IEM_XCPT_FLAGS_CR2;
14579 switch (u8TrapNo)
14580 {
14581 case X86_XCPT_DF:
14582 case X86_XCPT_TS:
14583 case X86_XCPT_NP:
14584 case X86_XCPT_SS:
14585 case X86_XCPT_PF:
14586 case X86_XCPT_AC:
14587 fFlags |= IEM_XCPT_FLAGS_ERR;
14588 break;
14589
14590 case X86_XCPT_NMI:
14591 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14592 break;
14593 }
14594 break;
14595
14596 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14597 }
14598
14599 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14600
14601 if (pVCpu->iem.s.cActiveMappings > 0)
14602 iemMemRollback(pVCpu);
14603
14604 return rcStrict;
14605}
14606
14607
14608/**
14609 * Injects the active TRPM event.
14610 *
14611 * @returns Strict VBox status code.
14612 * @param pVCpu The cross context virtual CPU structure.
14613 */
14614VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14615{
14616#ifndef IEM_IMPLEMENTS_TASKSWITCH
14617 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14618#else
14619 uint8_t u8TrapNo;
14620 TRPMEVENT enmType;
14621 RTGCUINT uErrCode;
14622 RTGCUINTPTR uCr2;
14623 uint8_t cbInstr;
14624 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14625 if (RT_FAILURE(rc))
14626 return rc;
14627
14628 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14629# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14630 if (rcStrict == VINF_SVM_VMEXIT)
14631 rcStrict = VINF_SUCCESS;
14632# endif
14633
14634 /** @todo Are there any other codes that imply the event was successfully
14635 * delivered to the guest? See @bugref{6607}. */
14636 if ( rcStrict == VINF_SUCCESS
14637 || rcStrict == VINF_IEM_RAISED_XCPT)
14638 TRPMResetTrap(pVCpu);
14639
14640 return rcStrict;
14641#endif
14642}
14643
14644
14645VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14646{
14647 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14648 return VERR_NOT_IMPLEMENTED;
14649}
14650
14651
14652VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14653{
14654 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14655 return VERR_NOT_IMPLEMENTED;
14656}
14657
14658
14659#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14660/**
14661 * Executes a IRET instruction with default operand size.
14662 *
14663 * This is for PATM.
14664 *
14665 * @returns VBox status code.
14666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14667 * @param pCtxCore The register frame.
14668 */
14669VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14670{
14671 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14672
14673 iemCtxCoreToCtx(pCtx, pCtxCore);
14674 iemInitDecoder(pVCpu);
14675 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14676 if (rcStrict == VINF_SUCCESS)
14677 iemCtxToCtxCore(pCtxCore, pCtx);
14678 else
14679 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14680 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14681 return rcStrict;
14682}
14683#endif
14684
14685
14686/**
14687 * Macro used by the IEMExec* method to check the given instruction length.
14688 *
14689 * Will return on failure!
14690 *
14691 * @param a_cbInstr The given instruction length.
14692 * @param a_cbMin The minimum length.
14693 */
14694#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14695 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14696 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14697
14698
14699/**
14700 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14701 *
14702 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14703 *
14704 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14706 * @param rcStrict The status code to fiddle.
14707 */
14708DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14709{
14710 iemUninitExec(pVCpu);
14711#ifdef IN_RC
14712 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14713#else
14714 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14715#endif
14716}
14717
14718
14719/**
14720 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14721 *
14722 * This API ASSUMES that the caller has already verified that the guest code is
14723 * allowed to access the I/O port. (The I/O port is in the DX register in the
14724 * guest state.)
14725 *
14726 * @returns Strict VBox status code.
14727 * @param pVCpu The cross context virtual CPU structure.
14728 * @param cbValue The size of the I/O port access (1, 2, or 4).
14729 * @param enmAddrMode The addressing mode.
14730 * @param fRepPrefix Indicates whether a repeat prefix is used
14731 * (doesn't matter which for this instruction).
14732 * @param cbInstr The instruction length in bytes.
14733 * @param iEffSeg The effective segment address.
14734 * @param fIoChecked Whether the access to the I/O port has been
14735 * checked or not. It's typically checked in the
14736 * HM scenario.
14737 */
14738VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14739 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14740{
14741 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14742 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14743
14744 /*
14745 * State init.
14746 */
14747 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14748
14749 /*
14750 * Switch orgy for getting to the right handler.
14751 */
14752 VBOXSTRICTRC rcStrict;
14753 if (fRepPrefix)
14754 {
14755 switch (enmAddrMode)
14756 {
14757 case IEMMODE_16BIT:
14758 switch (cbValue)
14759 {
14760 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14761 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14762 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14763 default:
14764 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14765 }
14766 break;
14767
14768 case IEMMODE_32BIT:
14769 switch (cbValue)
14770 {
14771 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14772 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14773 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14774 default:
14775 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14776 }
14777 break;
14778
14779 case IEMMODE_64BIT:
14780 switch (cbValue)
14781 {
14782 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14783 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14784 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14785 default:
14786 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14787 }
14788 break;
14789
14790 default:
14791 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14792 }
14793 }
14794 else
14795 {
14796 switch (enmAddrMode)
14797 {
14798 case IEMMODE_16BIT:
14799 switch (cbValue)
14800 {
14801 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14802 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14803 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14804 default:
14805 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14806 }
14807 break;
14808
14809 case IEMMODE_32BIT:
14810 switch (cbValue)
14811 {
14812 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14813 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14814 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14815 default:
14816 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14817 }
14818 break;
14819
14820 case IEMMODE_64BIT:
14821 switch (cbValue)
14822 {
14823 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14824 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14825 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14826 default:
14827 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14828 }
14829 break;
14830
14831 default:
14832 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14833 }
14834 }
14835
14836 if (pVCpu->iem.s.cActiveMappings)
14837 iemMemRollback(pVCpu);
14838
14839 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14840}
14841
14842
14843/**
14844 * Interface for HM and EM for executing string I/O IN (read) instructions.
14845 *
14846 * This API ASSUMES that the caller has already verified that the guest code is
14847 * allowed to access the I/O port. (The I/O port is in the DX register in the
14848 * guest state.)
14849 *
14850 * @returns Strict VBox status code.
14851 * @param pVCpu The cross context virtual CPU structure.
14852 * @param cbValue The size of the I/O port access (1, 2, or 4).
14853 * @param enmAddrMode The addressing mode.
14854 * @param fRepPrefix Indicates whether a repeat prefix is used
14855 * (doesn't matter which for this instruction).
14856 * @param cbInstr The instruction length in bytes.
14857 * @param fIoChecked Whether the access to the I/O port has been
14858 * checked or not. It's typically checked in the
14859 * HM scenario.
14860 */
14861VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14862 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14863{
14864 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14865
14866 /*
14867 * State init.
14868 */
14869 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14870
14871 /*
14872 * Switch orgy for getting to the right handler.
14873 */
14874 VBOXSTRICTRC rcStrict;
14875 if (fRepPrefix)
14876 {
14877 switch (enmAddrMode)
14878 {
14879 case IEMMODE_16BIT:
14880 switch (cbValue)
14881 {
14882 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14883 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14884 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14885 default:
14886 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14887 }
14888 break;
14889
14890 case IEMMODE_32BIT:
14891 switch (cbValue)
14892 {
14893 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14894 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14895 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14896 default:
14897 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14898 }
14899 break;
14900
14901 case IEMMODE_64BIT:
14902 switch (cbValue)
14903 {
14904 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14905 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14906 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14907 default:
14908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14909 }
14910 break;
14911
14912 default:
14913 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14914 }
14915 }
14916 else
14917 {
14918 switch (enmAddrMode)
14919 {
14920 case IEMMODE_16BIT:
14921 switch (cbValue)
14922 {
14923 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14924 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14925 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14926 default:
14927 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14928 }
14929 break;
14930
14931 case IEMMODE_32BIT:
14932 switch (cbValue)
14933 {
14934 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14935 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14936 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14937 default:
14938 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14939 }
14940 break;
14941
14942 case IEMMODE_64BIT:
14943 switch (cbValue)
14944 {
14945 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14946 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14947 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14948 default:
14949 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14950 }
14951 break;
14952
14953 default:
14954 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14955 }
14956 }
14957
14958 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14959 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14960}
14961
14962
14963/**
14964 * Interface for rawmode to write execute an OUT instruction.
14965 *
14966 * @returns Strict VBox status code.
14967 * @param pVCpu The cross context virtual CPU structure.
14968 * @param cbInstr The instruction length in bytes.
14969 * @param u16Port The port to read.
14970 * @param fImm Whether the port is specified using an immediate operand or
14971 * using the implicit DX register.
14972 * @param cbReg The register size.
14973 *
14974 * @remarks In ring-0 not all of the state needs to be synced in.
14975 */
14976VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
14977{
14978 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14979 Assert(cbReg <= 4 && cbReg != 3);
14980
14981 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14982 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
14983 Assert(!pVCpu->iem.s.cActiveMappings);
14984 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14985}
14986
14987
14988/**
14989 * Interface for rawmode to write execute an IN instruction.
14990 *
14991 * @returns Strict VBox status code.
14992 * @param pVCpu The cross context virtual CPU structure.
14993 * @param cbInstr The instruction length in bytes.
14994 * @param u16Port The port to read.
14995 * @param fImm Whether the port is specified using an immediate operand or
14996 * using the implicit DX.
14997 * @param cbReg The register size.
14998 */
14999VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15000{
15001 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15002 Assert(cbReg <= 4 && cbReg != 3);
15003
15004 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15005 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15006 Assert(!pVCpu->iem.s.cActiveMappings);
15007 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15008}
15009
15010
15011/**
15012 * Interface for HM and EM to write to a CRx register.
15013 *
15014 * @returns Strict VBox status code.
15015 * @param pVCpu The cross context virtual CPU structure.
15016 * @param cbInstr The instruction length in bytes.
15017 * @param iCrReg The control register number (destination).
15018 * @param iGReg The general purpose register number (source).
15019 *
15020 * @remarks In ring-0 not all of the state needs to be synced in.
15021 */
15022VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15023{
15024 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15025 Assert(iCrReg < 16);
15026 Assert(iGReg < 16);
15027
15028 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15030 Assert(!pVCpu->iem.s.cActiveMappings);
15031 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15032}
15033
15034
15035/**
15036 * Interface for HM and EM to read from a CRx register.
15037 *
15038 * @returns Strict VBox status code.
15039 * @param pVCpu The cross context virtual CPU structure.
15040 * @param cbInstr The instruction length in bytes.
15041 * @param iGReg The general purpose register number (destination).
15042 * @param iCrReg The control register number (source).
15043 *
15044 * @remarks In ring-0 not all of the state needs to be synced in.
15045 */
15046VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15047{
15048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15050 | CPUMCTX_EXTRN_APIC_TPR);
15051 Assert(iCrReg < 16);
15052 Assert(iGReg < 16);
15053
15054 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15056 Assert(!pVCpu->iem.s.cActiveMappings);
15057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15058}
15059
15060
15061/**
15062 * Interface for HM and EM to clear the CR0[TS] bit.
15063 *
15064 * @returns Strict VBox status code.
15065 * @param pVCpu The cross context virtual CPU structure.
15066 * @param cbInstr The instruction length in bytes.
15067 *
15068 * @remarks In ring-0 not all of the state needs to be synced in.
15069 */
15070VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15071{
15072 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15073
15074 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15075 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15076 Assert(!pVCpu->iem.s.cActiveMappings);
15077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15078}
15079
15080
15081/**
15082 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15083 *
15084 * @returns Strict VBox status code.
15085 * @param pVCpu The cross context virtual CPU structure.
15086 * @param cbInstr The instruction length in bytes.
15087 * @param uValue The value to load into CR0.
15088 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15089 * memory operand. Otherwise pass NIL_RTGCPTR.
15090 *
15091 * @remarks In ring-0 not all of the state needs to be synced in.
15092 */
15093VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15094{
15095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15096
15097 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15098 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15099 Assert(!pVCpu->iem.s.cActiveMappings);
15100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15101}
15102
15103
15104/**
15105 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15106 *
15107 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15108 *
15109 * @returns Strict VBox status code.
15110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15111 * @param cbInstr The instruction length in bytes.
15112 * @remarks In ring-0 not all of the state needs to be synced in.
15113 * @thread EMT(pVCpu)
15114 */
15115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15116{
15117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15118
15119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15121 Assert(!pVCpu->iem.s.cActiveMappings);
15122 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15123}
15124
15125
15126/**
15127 * Interface for HM and EM to emulate the WBINVD instruction.
15128 *
15129 * @returns Strict VBox status code.
15130 * @param pVCpu The cross context virtual CPU structure.
15131 * @param cbInstr The instruction length in bytes.
15132 *
15133 * @remarks In ring-0 not all of the state needs to be synced in.
15134 */
15135VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15136{
15137 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15138
15139 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15140 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15141 Assert(!pVCpu->iem.s.cActiveMappings);
15142 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15143}
15144
15145
15146/**
15147 * Interface for HM and EM to emulate the INVD instruction.
15148 *
15149 * @returns Strict VBox status code.
15150 * @param pVCpu The cross context virtual CPU structure.
15151 * @param cbInstr The instruction length in bytes.
15152 *
15153 * @remarks In ring-0 not all of the state needs to be synced in.
15154 */
15155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15156{
15157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15158
15159 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15161 Assert(!pVCpu->iem.s.cActiveMappings);
15162 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15163}
15164
15165
15166/**
15167 * Interface for HM and EM to emulate the INVLPG instruction.
15168 *
15169 * @returns Strict VBox status code.
15170 * @retval VINF_PGM_SYNC_CR3
15171 *
15172 * @param pVCpu The cross context virtual CPU structure.
15173 * @param cbInstr The instruction length in bytes.
15174 * @param GCPtrPage The effective address of the page to invalidate.
15175 *
15176 * @remarks In ring-0 not all of the state needs to be synced in.
15177 */
15178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15179{
15180 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15181
15182 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15183 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15184 Assert(!pVCpu->iem.s.cActiveMappings);
15185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15186}
15187
15188
15189/**
15190 * Interface for HM and EM to emulate the CPUID instruction.
15191 *
15192 * @returns Strict VBox status code.
15193 *
15194 * @param pVCpu The cross context virtual CPU structure.
15195 * @param cbInstr The instruction length in bytes.
15196 *
15197 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15198 */
15199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15200{
15201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15203
15204 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15206 Assert(!pVCpu->iem.s.cActiveMappings);
15207 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15208}
15209
15210
15211/**
15212 * Interface for HM and EM to emulate the RDPMC instruction.
15213 *
15214 * @returns Strict VBox status code.
15215 *
15216 * @param pVCpu The cross context virtual CPU structure.
15217 * @param cbInstr The instruction length in bytes.
15218 *
15219 * @remarks Not all of the state needs to be synced in.
15220 */
15221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15222{
15223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15224 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15225
15226 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15227 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15228 Assert(!pVCpu->iem.s.cActiveMappings);
15229 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15230}
15231
15232
15233/**
15234 * Interface for HM and EM to emulate the RDTSC instruction.
15235 *
15236 * @returns Strict VBox status code.
15237 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15238 *
15239 * @param pVCpu The cross context virtual CPU structure.
15240 * @param cbInstr The instruction length in bytes.
15241 *
15242 * @remarks Not all of the state needs to be synced in.
15243 */
15244VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15245{
15246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15247 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15248
15249 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15250 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15251 Assert(!pVCpu->iem.s.cActiveMappings);
15252 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15253}
15254
15255
15256/**
15257 * Interface for HM and EM to emulate the RDTSCP instruction.
15258 *
15259 * @returns Strict VBox status code.
15260 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15261 *
15262 * @param pVCpu The cross context virtual CPU structure.
15263 * @param cbInstr The instruction length in bytes.
15264 *
15265 * @remarks Not all of the state needs to be synced in. Recommended
15266 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15267 */
15268VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15269{
15270 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15271 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15272
15273 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15274 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15275 Assert(!pVCpu->iem.s.cActiveMappings);
15276 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15277}
15278
15279
15280/**
15281 * Interface for HM and EM to emulate the RDMSR instruction.
15282 *
15283 * @returns Strict VBox status code.
15284 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15285 *
15286 * @param pVCpu The cross context virtual CPU structure.
15287 * @param cbInstr The instruction length in bytes.
15288 *
15289 * @remarks Not all of the state needs to be synced in. Requires RCX and
15290 * (currently) all MSRs.
15291 */
15292VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15293{
15294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15295 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15296
15297 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15298 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15299 Assert(!pVCpu->iem.s.cActiveMappings);
15300 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15301}
15302
15303
15304/**
15305 * Interface for HM and EM to emulate the WRMSR instruction.
15306 *
15307 * @returns Strict VBox status code.
15308 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15309 *
15310 * @param pVCpu The cross context virtual CPU structure.
15311 * @param cbInstr The instruction length in bytes.
15312 *
15313 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15314 * and (currently) all MSRs.
15315 */
15316VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15317{
15318 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15319 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15320 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15321
15322 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15324 Assert(!pVCpu->iem.s.cActiveMappings);
15325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15326}
15327
15328
15329/**
15330 * Interface for HM and EM to emulate the MONITOR instruction.
15331 *
15332 * @returns Strict VBox status code.
15333 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15334 *
15335 * @param pVCpu The cross context virtual CPU structure.
15336 * @param cbInstr The instruction length in bytes.
15337 *
15338 * @remarks Not all of the state needs to be synced in.
15339 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15340 * are used.
15341 */
15342VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15343{
15344 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15346
15347 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15349 Assert(!pVCpu->iem.s.cActiveMappings);
15350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15351}
15352
15353
15354/**
15355 * Interface for HM and EM to emulate the MWAIT instruction.
15356 *
15357 * @returns Strict VBox status code.
15358 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15359 *
15360 * @param pVCpu The cross context virtual CPU structure.
15361 * @param cbInstr The instruction length in bytes.
15362 *
15363 * @remarks Not all of the state needs to be synced in.
15364 */
15365VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15366{
15367 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15368
15369 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15370 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15371 Assert(!pVCpu->iem.s.cActiveMappings);
15372 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15373}
15374
15375
15376/**
15377 * Interface for HM and EM to emulate the HLT instruction.
15378 *
15379 * @returns Strict VBox status code.
15380 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15381 *
15382 * @param pVCpu The cross context virtual CPU structure.
15383 * @param cbInstr The instruction length in bytes.
15384 *
15385 * @remarks Not all of the state needs to be synced in.
15386 */
15387VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15388{
15389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15390
15391 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15392 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15393 Assert(!pVCpu->iem.s.cActiveMappings);
15394 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15395}
15396
15397
15398/**
15399 * Checks if IEM is in the process of delivering an event (interrupt or
15400 * exception).
15401 *
15402 * @returns true if we're in the process of raising an interrupt or exception,
15403 * false otherwise.
15404 * @param pVCpu The cross context virtual CPU structure.
15405 * @param puVector Where to store the vector associated with the
15406 * currently delivered event, optional.
15407 * @param pfFlags Where to store th event delivery flags (see
15408 * IEM_XCPT_FLAGS_XXX), optional.
15409 * @param puErr Where to store the error code associated with the
15410 * event, optional.
15411 * @param puCr2 Where to store the CR2 associated with the event,
15412 * optional.
15413 * @remarks The caller should check the flags to determine if the error code and
15414 * CR2 are valid for the event.
15415 */
15416VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15417{
15418 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15419 if (fRaisingXcpt)
15420 {
15421 if (puVector)
15422 *puVector = pVCpu->iem.s.uCurXcpt;
15423 if (pfFlags)
15424 *pfFlags = pVCpu->iem.s.fCurXcpt;
15425 if (puErr)
15426 *puErr = pVCpu->iem.s.uCurXcptErr;
15427 if (puCr2)
15428 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15429 }
15430 return fRaisingXcpt;
15431}
15432
15433#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15434
15435/**
15436 * Interface for HM and EM to emulate the CLGI instruction.
15437 *
15438 * @returns Strict VBox status code.
15439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15440 * @param cbInstr The instruction length in bytes.
15441 * @thread EMT(pVCpu)
15442 */
15443VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15444{
15445 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15446
15447 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15448 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15449 Assert(!pVCpu->iem.s.cActiveMappings);
15450 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15451}
15452
15453
15454/**
15455 * Interface for HM and EM to emulate the STGI instruction.
15456 *
15457 * @returns Strict VBox status code.
15458 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15459 * @param cbInstr The instruction length in bytes.
15460 * @thread EMT(pVCpu)
15461 */
15462VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15463{
15464 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15465
15466 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15467 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15468 Assert(!pVCpu->iem.s.cActiveMappings);
15469 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15470}
15471
15472
15473/**
15474 * Interface for HM and EM to emulate the VMLOAD instruction.
15475 *
15476 * @returns Strict VBox status code.
15477 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15478 * @param cbInstr The instruction length in bytes.
15479 * @thread EMT(pVCpu)
15480 */
15481VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15482{
15483 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15484
15485 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15487 Assert(!pVCpu->iem.s.cActiveMappings);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the VMSAVE instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15497 * @param cbInstr The instruction length in bytes.
15498 * @thread EMT(pVCpu)
15499 */
15500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15501{
15502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15503
15504 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15505 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15506 Assert(!pVCpu->iem.s.cActiveMappings);
15507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15508}
15509
15510
15511/**
15512 * Interface for HM and EM to emulate the INVLPGA instruction.
15513 *
15514 * @returns Strict VBox status code.
15515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15516 * @param cbInstr The instruction length in bytes.
15517 * @thread EMT(pVCpu)
15518 */
15519VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15520{
15521 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15522
15523 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15524 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15525 Assert(!pVCpu->iem.s.cActiveMappings);
15526 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15527}
15528
15529
15530/**
15531 * Interface for HM and EM to emulate the VMRUN instruction.
15532 *
15533 * @returns Strict VBox status code.
15534 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15535 * @param cbInstr The instruction length in bytes.
15536 * @thread EMT(pVCpu)
15537 */
15538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15539{
15540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15541 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15542
15543 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15545 Assert(!pVCpu->iem.s.cActiveMappings);
15546 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15547}
15548
15549
15550/**
15551 * Interface for HM and EM to emulate \#VMEXIT.
15552 *
15553 * @returns Strict VBox status code.
15554 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15555 * @param uExitCode The exit code.
15556 * @param uExitInfo1 The exit info. 1 field.
15557 * @param uExitInfo2 The exit info. 2 field.
15558 * @thread EMT(pVCpu)
15559 */
15560VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15561{
15562 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15563 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15564 if (pVCpu->iem.s.cActiveMappings)
15565 iemMemRollback(pVCpu);
15566 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15567}
15568
15569#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15570
15571#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15572
15573/**
15574 * Interface for HM and EM to emulate the VMREAD instruction.
15575 *
15576 * @returns Strict VBox status code.
15577 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15578 * @param pExitInfo Pointer to the VM-exit information struct.
15579 * @thread EMT(pVCpu)
15580 */
15581VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15582{
15583 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15584 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15585 Assert(pExitInfo);
15586
15587 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15588
15589 VBOXSTRICTRC rcStrict;
15590 uint8_t const cbInstr = pExitInfo->cbInstr;
15591 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15592 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15593 {
15594 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15595 {
15596 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15597 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15598 }
15599 else
15600 {
15601 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15602 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15603 }
15604 }
15605 else
15606 {
15607 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15608 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15609 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15610 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15611 }
15612 if (pVCpu->iem.s.cActiveMappings)
15613 iemMemRollback(pVCpu);
15614 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15615}
15616
15617
15618/**
15619 * Interface for HM and EM to emulate the VMWRITE instruction.
15620 *
15621 * @returns Strict VBox status code.
15622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15623 * @param pExitInfo Pointer to the VM-exit information struct.
15624 * @thread EMT(pVCpu)
15625 */
15626VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15627{
15628 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15630 Assert(pExitInfo);
15631
15632 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15633
15634 uint64_t u64Val;
15635 uint8_t iEffSeg;
15636 IEMMODE enmEffAddrMode;
15637 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15638 {
15639 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15640 iEffSeg = UINT8_MAX;
15641 enmEffAddrMode = UINT8_MAX;
15642 }
15643 else
15644 {
15645 u64Val = pExitInfo->GCPtrEffAddr;
15646 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15647 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15648 }
15649 uint8_t const cbInstr = pExitInfo->cbInstr;
15650 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15651 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15652 if (pVCpu->iem.s.cActiveMappings)
15653 iemMemRollback(pVCpu);
15654 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15655}
15656
15657
15658/**
15659 * Interface for HM and EM to emulate the VMPTRLD instruction.
15660 *
15661 * @returns Strict VBox status code.
15662 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15663 * @param pExitInfo Pointer to the VM-exit information struct.
15664 * @thread EMT(pVCpu)
15665 */
15666VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15667{
15668 Assert(pExitInfo);
15669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15670 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15671
15672 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15673
15674 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15675 uint8_t const cbInstr = pExitInfo->cbInstr;
15676 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15677 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15678 if (pVCpu->iem.s.cActiveMappings)
15679 iemMemRollback(pVCpu);
15680 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15681}
15682
15683
15684/**
15685 * Interface for HM and EM to emulate the VMPTRST instruction.
15686 *
15687 * @returns Strict VBox status code.
15688 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15689 * @param pExitInfo Pointer to the VM-exit information struct.
15690 * @thread EMT(pVCpu)
15691 */
15692VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15693{
15694 Assert(pExitInfo);
15695 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15696 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15697
15698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15699
15700 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15701 uint8_t const cbInstr = pExitInfo->cbInstr;
15702 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15703 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15704 if (pVCpu->iem.s.cActiveMappings)
15705 iemMemRollback(pVCpu);
15706 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15707}
15708
15709
15710/**
15711 * Interface for HM and EM to emulate the VMCLEAR instruction.
15712 *
15713 * @returns Strict VBox status code.
15714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15715 * @param pExitInfo Pointer to the VM-exit information struct.
15716 * @thread EMT(pVCpu)
15717 */
15718VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15719{
15720 Assert(pExitInfo);
15721 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15722 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15723
15724 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15725
15726 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15727 uint8_t const cbInstr = pExitInfo->cbInstr;
15728 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15729 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15730 if (pVCpu->iem.s.cActiveMappings)
15731 iemMemRollback(pVCpu);
15732 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15733}
15734
15735
15736/**
15737 * Interface for HM and EM to emulate the VMXON instruction.
15738 *
15739 * @returns Strict VBox status code.
15740 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15741 * @param pExitInfo Pointer to the VM-exit information struct.
15742 * @thread EMT(pVCpu)
15743 */
15744VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15745{
15746 Assert(pExitInfo);
15747 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15748 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15749
15750 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15751
15752 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15753 uint8_t const cbInstr = pExitInfo->cbInstr;
15754 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15755 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15756 if (pVCpu->iem.s.cActiveMappings)
15757 iemMemRollback(pVCpu);
15758 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15759}
15760
15761
15762/**
15763 * Interface for HM and EM to emulate the VMXOFF instruction.
15764 *
15765 * @returns Strict VBox status code.
15766 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15767 * @param cbInstr The instruction length in bytes.
15768 * @thread EMT(pVCpu)
15769 */
15770VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15771{
15772 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15773
15774 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15775 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15776 Assert(!pVCpu->iem.s.cActiveMappings);
15777 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15778}
15779
15780#endif
15781
15782#ifdef IN_RING3
15783
15784/**
15785 * Handles the unlikely and probably fatal merge cases.
15786 *
15787 * @returns Merged status code.
15788 * @param rcStrict Current EM status code.
15789 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15790 * with @a rcStrict.
15791 * @param iMemMap The memory mapping index. For error reporting only.
15792 * @param pVCpu The cross context virtual CPU structure of the calling
15793 * thread, for error reporting only.
15794 */
15795DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15796 unsigned iMemMap, PVMCPU pVCpu)
15797{
15798 if (RT_FAILURE_NP(rcStrict))
15799 return rcStrict;
15800
15801 if (RT_FAILURE_NP(rcStrictCommit))
15802 return rcStrictCommit;
15803
15804 if (rcStrict == rcStrictCommit)
15805 return rcStrictCommit;
15806
15807 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15808 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15809 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15812 return VERR_IOM_FF_STATUS_IPE;
15813}
15814
15815
15816/**
15817 * Helper for IOMR3ProcessForceFlag.
15818 *
15819 * @returns Merged status code.
15820 * @param rcStrict Current EM status code.
15821 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15822 * with @a rcStrict.
15823 * @param iMemMap The memory mapping index. For error reporting only.
15824 * @param pVCpu The cross context virtual CPU structure of the calling
15825 * thread, for error reporting only.
15826 */
15827DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15828{
15829 /* Simple. */
15830 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15831 return rcStrictCommit;
15832
15833 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15834 return rcStrict;
15835
15836 /* EM scheduling status codes. */
15837 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15838 && rcStrict <= VINF_EM_LAST))
15839 {
15840 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15841 && rcStrictCommit <= VINF_EM_LAST))
15842 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15843 }
15844
15845 /* Unlikely */
15846 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15847}
15848
15849
15850/**
15851 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15852 *
15853 * @returns Merge between @a rcStrict and what the commit operation returned.
15854 * @param pVM The cross context VM structure.
15855 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15856 * @param rcStrict The status code returned by ring-0 or raw-mode.
15857 */
15858VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15859{
15860 /*
15861 * Reset the pending commit.
15862 */
15863 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15864 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15865 ("%#x %#x %#x\n",
15866 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15867 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15868
15869 /*
15870 * Commit the pending bounce buffers (usually just one).
15871 */
15872 unsigned cBufs = 0;
15873 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15874 while (iMemMap-- > 0)
15875 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15876 {
15877 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15878 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15879 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15880
15881 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15882 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15883 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15884
15885 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15886 {
15887 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15889 pbBuf,
15890 cbFirst,
15891 PGMACCESSORIGIN_IEM);
15892 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15893 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15894 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15895 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15896 }
15897
15898 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15899 {
15900 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15902 pbBuf + cbFirst,
15903 cbSecond,
15904 PGMACCESSORIGIN_IEM);
15905 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15906 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15907 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15908 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15909 }
15910 cBufs++;
15911 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15912 }
15913
15914 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15915 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15916 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15917 pVCpu->iem.s.cActiveMappings = 0;
15918 return rcStrict;
15919}
15920
15921#endif /* IN_RING3 */
15922
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette