VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 73389

Last change on this file since 73389 was 73203, checked in by vboxsync, 7 years ago

VMM, Devices: bugref:9193 Remove unused code after using EMRZSetPendingIoPort[Read|Write].

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 614.4 KB
Line 
1/* $Id: IEMAll.cpp 73203 2018-07-18 13:00:43Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/**
363 * Gets the effective VEX.VVVV value.
364 *
365 * The 4th bit is ignored if not 64-bit code.
366 * @returns effective V-register value.
367 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
368 */
369#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
370 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
403 */
404# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
405 do { \
406 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
407 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
408 } while (0)
409
410/**
411 * Check if an SVM is enabled.
412 */
413# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
414
415/**
416 * Check if an SVM control/instruction intercept is set.
417 */
418# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
419
420/**
421 * Check if an SVM read CRx intercept is set.
422 */
423# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
424
425/**
426 * Check if an SVM write CRx intercept is set.
427 */
428# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
429
430/**
431 * Check if an SVM read DRx intercept is set.
432 */
433# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
434
435/**
436 * Check if an SVM write DRx intercept is set.
437 */
438# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
439
440/**
441 * Check if an SVM exception intercept is set.
442 */
443# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
444
445/**
446 * Get the SVM pause-filter count.
447 */
448# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
449
450/**
451 * Invokes the SVM \#VMEXIT handler for the nested-guest.
452 */
453# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
454 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
455
456/**
457 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
458 * corresponding decode assist information.
459 */
460# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
461 do \
462 { \
463 uint64_t uExitInfo1; \
464 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
465 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
466 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
467 else \
468 uExitInfo1 = 0; \
469 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
470 } while (0)
471
472#else
473# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
474# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
483# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
484# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
485
486#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
487
488
489/*********************************************************************************************************************************
490* Global Variables *
491*********************************************************************************************************************************/
492extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
493
494
495/** Function table for the ADD instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
497{
498 iemAImpl_add_u8, iemAImpl_add_u8_locked,
499 iemAImpl_add_u16, iemAImpl_add_u16_locked,
500 iemAImpl_add_u32, iemAImpl_add_u32_locked,
501 iemAImpl_add_u64, iemAImpl_add_u64_locked
502};
503
504/** Function table for the ADC instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
506{
507 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
508 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
509 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
510 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
511};
512
513/** Function table for the SUB instruction. */
514IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
515{
516 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
517 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
518 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
519 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
520};
521
522/** Function table for the SBB instruction. */
523IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
524{
525 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
526 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
527 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
528 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
529};
530
531/** Function table for the OR instruction. */
532IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
533{
534 iemAImpl_or_u8, iemAImpl_or_u8_locked,
535 iemAImpl_or_u16, iemAImpl_or_u16_locked,
536 iemAImpl_or_u32, iemAImpl_or_u32_locked,
537 iemAImpl_or_u64, iemAImpl_or_u64_locked
538};
539
540/** Function table for the XOR instruction. */
541IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
542{
543 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
544 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
545 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
546 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
547};
548
549/** Function table for the AND instruction. */
550IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
551{
552 iemAImpl_and_u8, iemAImpl_and_u8_locked,
553 iemAImpl_and_u16, iemAImpl_and_u16_locked,
554 iemAImpl_and_u32, iemAImpl_and_u32_locked,
555 iemAImpl_and_u64, iemAImpl_and_u64_locked
556};
557
558/** Function table for the CMP instruction.
559 * @remarks Making operand order ASSUMPTIONS.
560 */
561IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
562{
563 iemAImpl_cmp_u8, NULL,
564 iemAImpl_cmp_u16, NULL,
565 iemAImpl_cmp_u32, NULL,
566 iemAImpl_cmp_u64, NULL
567};
568
569/** Function table for the TEST instruction.
570 * @remarks Making operand order ASSUMPTIONS.
571 */
572IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
573{
574 iemAImpl_test_u8, NULL,
575 iemAImpl_test_u16, NULL,
576 iemAImpl_test_u32, NULL,
577 iemAImpl_test_u64, NULL
578};
579
580/** Function table for the BT instruction. */
581IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
582{
583 NULL, NULL,
584 iemAImpl_bt_u16, NULL,
585 iemAImpl_bt_u32, NULL,
586 iemAImpl_bt_u64, NULL
587};
588
589/** Function table for the BTC instruction. */
590IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
591{
592 NULL, NULL,
593 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
594 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
595 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
596};
597
598/** Function table for the BTR instruction. */
599IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
600{
601 NULL, NULL,
602 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
603 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
604 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
605};
606
607/** Function table for the BTS instruction. */
608IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
609{
610 NULL, NULL,
611 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
612 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
613 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
614};
615
616/** Function table for the BSF instruction. */
617IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
618{
619 NULL, NULL,
620 iemAImpl_bsf_u16, NULL,
621 iemAImpl_bsf_u32, NULL,
622 iemAImpl_bsf_u64, NULL
623};
624
625/** Function table for the BSR instruction. */
626IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
627{
628 NULL, NULL,
629 iemAImpl_bsr_u16, NULL,
630 iemAImpl_bsr_u32, NULL,
631 iemAImpl_bsr_u64, NULL
632};
633
634/** Function table for the IMUL instruction. */
635IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
636{
637 NULL, NULL,
638 iemAImpl_imul_two_u16, NULL,
639 iemAImpl_imul_two_u32, NULL,
640 iemAImpl_imul_two_u64, NULL
641};
642
643/** Group 1 /r lookup table. */
644IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
645{
646 &g_iemAImpl_add,
647 &g_iemAImpl_or,
648 &g_iemAImpl_adc,
649 &g_iemAImpl_sbb,
650 &g_iemAImpl_and,
651 &g_iemAImpl_sub,
652 &g_iemAImpl_xor,
653 &g_iemAImpl_cmp
654};
655
656/** Function table for the INC instruction. */
657IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
658{
659 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
660 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
661 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
662 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
663};
664
665/** Function table for the DEC instruction. */
666IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
667{
668 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
669 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
670 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
671 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
672};
673
674/** Function table for the NEG instruction. */
675IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
676{
677 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
678 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
679 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
680 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
681};
682
683/** Function table for the NOT instruction. */
684IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
685{
686 iemAImpl_not_u8, iemAImpl_not_u8_locked,
687 iemAImpl_not_u16, iemAImpl_not_u16_locked,
688 iemAImpl_not_u32, iemAImpl_not_u32_locked,
689 iemAImpl_not_u64, iemAImpl_not_u64_locked
690};
691
692
693/** Function table for the ROL instruction. */
694IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
695{
696 iemAImpl_rol_u8,
697 iemAImpl_rol_u16,
698 iemAImpl_rol_u32,
699 iemAImpl_rol_u64
700};
701
702/** Function table for the ROR instruction. */
703IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
704{
705 iemAImpl_ror_u8,
706 iemAImpl_ror_u16,
707 iemAImpl_ror_u32,
708 iemAImpl_ror_u64
709};
710
711/** Function table for the RCL instruction. */
712IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
713{
714 iemAImpl_rcl_u8,
715 iemAImpl_rcl_u16,
716 iemAImpl_rcl_u32,
717 iemAImpl_rcl_u64
718};
719
720/** Function table for the RCR instruction. */
721IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
722{
723 iemAImpl_rcr_u8,
724 iemAImpl_rcr_u16,
725 iemAImpl_rcr_u32,
726 iemAImpl_rcr_u64
727};
728
729/** Function table for the SHL instruction. */
730IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
731{
732 iemAImpl_shl_u8,
733 iemAImpl_shl_u16,
734 iemAImpl_shl_u32,
735 iemAImpl_shl_u64
736};
737
738/** Function table for the SHR instruction. */
739IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
740{
741 iemAImpl_shr_u8,
742 iemAImpl_shr_u16,
743 iemAImpl_shr_u32,
744 iemAImpl_shr_u64
745};
746
747/** Function table for the SAR instruction. */
748IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
749{
750 iemAImpl_sar_u8,
751 iemAImpl_sar_u16,
752 iemAImpl_sar_u32,
753 iemAImpl_sar_u64
754};
755
756
757/** Function table for the MUL instruction. */
758IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
759{
760 iemAImpl_mul_u8,
761 iemAImpl_mul_u16,
762 iemAImpl_mul_u32,
763 iemAImpl_mul_u64
764};
765
766/** Function table for the IMUL instruction working implicitly on rAX. */
767IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
768{
769 iemAImpl_imul_u8,
770 iemAImpl_imul_u16,
771 iemAImpl_imul_u32,
772 iemAImpl_imul_u64
773};
774
775/** Function table for the DIV instruction. */
776IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
777{
778 iemAImpl_div_u8,
779 iemAImpl_div_u16,
780 iemAImpl_div_u32,
781 iemAImpl_div_u64
782};
783
784/** Function table for the MUL instruction. */
785IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
786{
787 iemAImpl_idiv_u8,
788 iemAImpl_idiv_u16,
789 iemAImpl_idiv_u32,
790 iemAImpl_idiv_u64
791};
792
793/** Function table for the SHLD instruction */
794IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
795{
796 iemAImpl_shld_u16,
797 iemAImpl_shld_u32,
798 iemAImpl_shld_u64,
799};
800
801/** Function table for the SHRD instruction */
802IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
803{
804 iemAImpl_shrd_u16,
805 iemAImpl_shrd_u32,
806 iemAImpl_shrd_u64,
807};
808
809
810/** Function table for the PUNPCKLBW instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
812/** Function table for the PUNPCKLBD instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
814/** Function table for the PUNPCKLDQ instruction */
815IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
816/** Function table for the PUNPCKLQDQ instruction */
817IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
818
819/** Function table for the PUNPCKHBW instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
821/** Function table for the PUNPCKHBD instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
823/** Function table for the PUNPCKHDQ instruction */
824IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
825/** Function table for the PUNPCKHQDQ instruction */
826IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
827
828/** Function table for the PXOR instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
830/** Function table for the PCMPEQB instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
832/** Function table for the PCMPEQW instruction */
833IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
834/** Function table for the PCMPEQD instruction */
835IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
836
837
838#if defined(IEM_LOG_MEMORY_WRITES)
839/** What IEM just wrote. */
840uint8_t g_abIemWrote[256];
841/** How much IEM just wrote. */
842size_t g_cbIemWrote;
843#endif
844
845
846/*********************************************************************************************************************************
847* Internal Functions *
848*********************************************************************************************************************************/
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
852IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
853/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
855IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
860IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
863IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
864IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
865IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
866#ifdef IEM_WITH_SETJMP
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
871DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
872#endif
873
874IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
883IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
887IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
888IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
889IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
890IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
891
892#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
893IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
894IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
895#endif
896
897/**
898 * Sets the pass up status.
899 *
900 * @returns VINF_SUCCESS.
901 * @param pVCpu The cross context virtual CPU structure of the
902 * calling thread.
903 * @param rcPassUp The pass up status. Must be informational.
904 * VINF_SUCCESS is not allowed.
905 */
906IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
907{
908 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
909
910 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
911 if (rcOldPassUp == VINF_SUCCESS)
912 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
913 /* If both are EM scheduling codes, use EM priority rules. */
914 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
915 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
916 {
917 if (rcPassUp < rcOldPassUp)
918 {
919 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
920 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
921 }
922 else
923 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
924 }
925 /* Override EM scheduling with specific status code. */
926 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
927 {
928 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
929 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
930 }
931 /* Don't override specific status code, first come first served. */
932 else
933 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
934 return VINF_SUCCESS;
935}
936
937
938/**
939 * Calculates the CPU mode.
940 *
941 * This is mainly for updating IEMCPU::enmCpuMode.
942 *
943 * @returns CPU mode.
944 * @param pVCpu The cross context virtual CPU structure of the
945 * calling thread.
946 */
947DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
948{
949 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
950 return IEMMODE_64BIT;
951 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
952 return IEMMODE_32BIT;
953 return IEMMODE_16BIT;
954}
955
956
957/**
958 * Initializes the execution state.
959 *
960 * @param pVCpu The cross context virtual CPU structure of the
961 * calling thread.
962 * @param fBypassHandlers Whether to bypass access handlers.
963 *
964 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
965 * side-effects in strict builds.
966 */
967DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
968{
969 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
970 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
971
972#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
973 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
976 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
981#endif
982
983#ifdef VBOX_WITH_RAW_MODE_NOT_R0
984 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
985#endif
986 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
987 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
988#ifdef VBOX_STRICT
989 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
990 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
991 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
992 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
993 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
994 pVCpu->iem.s.uRexReg = 127;
995 pVCpu->iem.s.uRexB = 127;
996 pVCpu->iem.s.uRexIndex = 127;
997 pVCpu->iem.s.iEffSeg = 127;
998 pVCpu->iem.s.idxPrefix = 127;
999 pVCpu->iem.s.uVex3rdReg = 127;
1000 pVCpu->iem.s.uVexLength = 127;
1001 pVCpu->iem.s.fEvexStuff = 127;
1002 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1003# ifdef IEM_WITH_CODE_TLB
1004 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1005 pVCpu->iem.s.pbInstrBuf = NULL;
1006 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1007 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1008 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1009 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1010# else
1011 pVCpu->iem.s.offOpcode = 127;
1012 pVCpu->iem.s.cbOpcode = 127;
1013# endif
1014#endif
1015
1016 pVCpu->iem.s.cActiveMappings = 0;
1017 pVCpu->iem.s.iNextMapping = 0;
1018 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1019 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1020#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1021 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1022 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1023 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1024 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1025 if (!pVCpu->iem.s.fInPatchCode)
1026 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1027#endif
1028}
1029
1030#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1031/**
1032 * Performs a minimal reinitialization of the execution state.
1033 *
1034 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1035 * 'world-switch' types operations on the CPU. Currently only nested
1036 * hardware-virtualization uses it.
1037 *
1038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1039 */
1040IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1041{
1042 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1043 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1044
1045 pVCpu->iem.s.uCpl = uCpl;
1046 pVCpu->iem.s.enmCpuMode = enmMode;
1047 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1048 pVCpu->iem.s.enmEffAddrMode = enmMode;
1049 if (enmMode != IEMMODE_64BIT)
1050 {
1051 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1052 pVCpu->iem.s.enmEffOpSize = enmMode;
1053 }
1054 else
1055 {
1056 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1057 pVCpu->iem.s.enmEffOpSize = enmMode;
1058 }
1059 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1060#ifndef IEM_WITH_CODE_TLB
1061 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1062 pVCpu->iem.s.offOpcode = 0;
1063 pVCpu->iem.s.cbOpcode = 0;
1064#endif
1065 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1066}
1067#endif
1068
1069/**
1070 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1071 *
1072 * @param pVCpu The cross context virtual CPU structure of the
1073 * calling thread.
1074 */
1075DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1076{
1077 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1078#ifdef VBOX_STRICT
1079# ifdef IEM_WITH_CODE_TLB
1080 NOREF(pVCpu);
1081# else
1082 pVCpu->iem.s.cbOpcode = 0;
1083# endif
1084#else
1085 NOREF(pVCpu);
1086#endif
1087}
1088
1089
1090/**
1091 * Initializes the decoder state.
1092 *
1093 * iemReInitDecoder is mostly a copy of this function.
1094 *
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param fBypassHandlers Whether to bypass access handlers.
1098 */
1099DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1100{
1101 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1102 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1103
1104#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1105 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1107 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1108 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1109 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1110 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1111 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1113#endif
1114
1115#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1116 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1117#endif
1118 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1119 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1120 pVCpu->iem.s.enmCpuMode = enmMode;
1121 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1122 pVCpu->iem.s.enmEffAddrMode = enmMode;
1123 if (enmMode != IEMMODE_64BIT)
1124 {
1125 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1126 pVCpu->iem.s.enmEffOpSize = enmMode;
1127 }
1128 else
1129 {
1130 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1131 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1132 }
1133 pVCpu->iem.s.fPrefixes = 0;
1134 pVCpu->iem.s.uRexReg = 0;
1135 pVCpu->iem.s.uRexB = 0;
1136 pVCpu->iem.s.uRexIndex = 0;
1137 pVCpu->iem.s.idxPrefix = 0;
1138 pVCpu->iem.s.uVex3rdReg = 0;
1139 pVCpu->iem.s.uVexLength = 0;
1140 pVCpu->iem.s.fEvexStuff = 0;
1141 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1142#ifdef IEM_WITH_CODE_TLB
1143 pVCpu->iem.s.pbInstrBuf = NULL;
1144 pVCpu->iem.s.offInstrNextByte = 0;
1145 pVCpu->iem.s.offCurInstrStart = 0;
1146# ifdef VBOX_STRICT
1147 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1148 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1149 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1150# endif
1151#else
1152 pVCpu->iem.s.offOpcode = 0;
1153 pVCpu->iem.s.cbOpcode = 0;
1154#endif
1155 pVCpu->iem.s.cActiveMappings = 0;
1156 pVCpu->iem.s.iNextMapping = 0;
1157 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1158 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1159#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1160 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1161 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1162 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1163 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1164 if (!pVCpu->iem.s.fInPatchCode)
1165 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1166#endif
1167
1168#ifdef DBGFTRACE_ENABLED
1169 switch (enmMode)
1170 {
1171 case IEMMODE_64BIT:
1172 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1173 break;
1174 case IEMMODE_32BIT:
1175 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1176 break;
1177 case IEMMODE_16BIT:
1178 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1179 break;
1180 }
1181#endif
1182}
1183
1184
1185/**
1186 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1187 *
1188 * This is mostly a copy of iemInitDecoder.
1189 *
1190 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1191 */
1192DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1193{
1194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1195
1196#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1205#endif
1206
1207 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1208 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1209 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1210 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1211 pVCpu->iem.s.enmEffAddrMode = enmMode;
1212 if (enmMode != IEMMODE_64BIT)
1213 {
1214 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffOpSize = enmMode;
1216 }
1217 else
1218 {
1219 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1220 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1221 }
1222 pVCpu->iem.s.fPrefixes = 0;
1223 pVCpu->iem.s.uRexReg = 0;
1224 pVCpu->iem.s.uRexB = 0;
1225 pVCpu->iem.s.uRexIndex = 0;
1226 pVCpu->iem.s.idxPrefix = 0;
1227 pVCpu->iem.s.uVex3rdReg = 0;
1228 pVCpu->iem.s.uVexLength = 0;
1229 pVCpu->iem.s.fEvexStuff = 0;
1230 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1231#ifdef IEM_WITH_CODE_TLB
1232 if (pVCpu->iem.s.pbInstrBuf)
1233 {
1234 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1235 - pVCpu->iem.s.uInstrBufPc;
1236 if (off < pVCpu->iem.s.cbInstrBufTotal)
1237 {
1238 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1239 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1240 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1241 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1242 else
1243 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1244 }
1245 else
1246 {
1247 pVCpu->iem.s.pbInstrBuf = NULL;
1248 pVCpu->iem.s.offInstrNextByte = 0;
1249 pVCpu->iem.s.offCurInstrStart = 0;
1250 pVCpu->iem.s.cbInstrBuf = 0;
1251 pVCpu->iem.s.cbInstrBufTotal = 0;
1252 }
1253 }
1254 else
1255 {
1256 pVCpu->iem.s.offInstrNextByte = 0;
1257 pVCpu->iem.s.offCurInstrStart = 0;
1258 pVCpu->iem.s.cbInstrBuf = 0;
1259 pVCpu->iem.s.cbInstrBufTotal = 0;
1260 }
1261#else
1262 pVCpu->iem.s.cbOpcode = 0;
1263 pVCpu->iem.s.offOpcode = 0;
1264#endif
1265 Assert(pVCpu->iem.s.cActiveMappings == 0);
1266 pVCpu->iem.s.iNextMapping = 0;
1267 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1268 Assert(pVCpu->iem.s.fBypassHandlers == false);
1269#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1270 if (!pVCpu->iem.s.fInPatchCode)
1271 { /* likely */ }
1272 else
1273 {
1274 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1275 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1276 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1277 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1278 if (!pVCpu->iem.s.fInPatchCode)
1279 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1280 }
1281#endif
1282
1283#ifdef DBGFTRACE_ENABLED
1284 switch (enmMode)
1285 {
1286 case IEMMODE_64BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1288 break;
1289 case IEMMODE_32BIT:
1290 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1291 break;
1292 case IEMMODE_16BIT:
1293 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1294 break;
1295 }
1296#endif
1297}
1298
1299
1300
1301/**
1302 * Prefetch opcodes the first time when starting executing.
1303 *
1304 * @returns Strict VBox status code.
1305 * @param pVCpu The cross context virtual CPU structure of the
1306 * calling thread.
1307 * @param fBypassHandlers Whether to bypass access handlers.
1308 */
1309IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1310{
1311 iemInitDecoder(pVCpu, fBypassHandlers);
1312
1313#ifdef IEM_WITH_CODE_TLB
1314 /** @todo Do ITLB lookup here. */
1315
1316#else /* !IEM_WITH_CODE_TLB */
1317
1318 /*
1319 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1320 *
1321 * First translate CS:rIP to a physical address.
1322 */
1323 uint32_t cbToTryRead;
1324 RTGCPTR GCPtrPC;
1325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1326 {
1327 cbToTryRead = PAGE_SIZE;
1328 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1329 if (IEM_IS_CANONICAL(GCPtrPC))
1330 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1331 else
1332 return iemRaiseGeneralProtectionFault0(pVCpu);
1333 }
1334 else
1335 {
1336 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1337 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1338 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1339 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1340 else
1341 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1342 if (cbToTryRead) { /* likely */ }
1343 else /* overflowed */
1344 {
1345 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1346 cbToTryRead = UINT32_MAX;
1347 }
1348 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1349 Assert(GCPtrPC <= UINT32_MAX);
1350 }
1351
1352# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1353 /* Allow interpretation of patch manager code blocks since they can for
1354 instance throw #PFs for perfectly good reasons. */
1355 if (pVCpu->iem.s.fInPatchCode)
1356 {
1357 size_t cbRead = 0;
1358 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1359 AssertRCReturn(rc, rc);
1360 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1361 return VINF_SUCCESS;
1362 }
1363# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1364
1365 RTGCPHYS GCPhys;
1366 uint64_t fFlags;
1367 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1368 if (RT_SUCCESS(rc)) { /* probable */ }
1369 else
1370 {
1371 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1372 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1373 }
1374 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1375 else
1376 {
1377 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1378 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1379 }
1380 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1381 else
1382 {
1383 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1384 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1385 }
1386 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1387 /** @todo Check reserved bits and such stuff. PGM is better at doing
1388 * that, so do it when implementing the guest virtual address
1389 * TLB... */
1390
1391 /*
1392 * Read the bytes at this address.
1393 */
1394 PVM pVM = pVCpu->CTX_SUFF(pVM);
1395# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1396 size_t cbActual;
1397 if ( PATMIsEnabled(pVM)
1398 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1399 {
1400 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1401 Assert(cbActual > 0);
1402 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1403 }
1404 else
1405# endif
1406 {
1407 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1408 if (cbToTryRead > cbLeftOnPage)
1409 cbToTryRead = cbLeftOnPage;
1410 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1411 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1412
1413 if (!pVCpu->iem.s.fBypassHandlers)
1414 {
1415 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1416 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1417 { /* likely */ }
1418 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1419 {
1420 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1421 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1422 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1423 }
1424 else
1425 {
1426 Log((RT_SUCCESS(rcStrict)
1427 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1428 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1429 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1430 return rcStrict;
1431 }
1432 }
1433 else
1434 {
1435 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1436 if (RT_SUCCESS(rc))
1437 { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1441 GCPtrPC, GCPhys, rc, cbToTryRead));
1442 return rc;
1443 }
1444 }
1445 pVCpu->iem.s.cbOpcode = cbToTryRead;
1446 }
1447#endif /* !IEM_WITH_CODE_TLB */
1448 return VINF_SUCCESS;
1449}
1450
1451
1452/**
1453 * Invalidates the IEM TLBs.
1454 *
1455 * This is called internally as well as by PGM when moving GC mappings.
1456 *
1457 * @returns
1458 * @param pVCpu The cross context virtual CPU structure of the calling
1459 * thread.
1460 * @param fVmm Set when PGM calls us with a remapping.
1461 */
1462VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1463{
1464#ifdef IEM_WITH_CODE_TLB
1465 pVCpu->iem.s.cbInstrBufTotal = 0;
1466 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1467 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1468 { /* very likely */ }
1469 else
1470 {
1471 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1472 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1473 while (i-- > 0)
1474 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1475 }
1476#endif
1477
1478#ifdef IEM_WITH_DATA_TLB
1479 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1480 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1481 { /* very likely */ }
1482 else
1483 {
1484 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1485 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1486 while (i-- > 0)
1487 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1488 }
1489#endif
1490 NOREF(pVCpu); NOREF(fVmm);
1491}
1492
1493
1494/**
1495 * Invalidates a page in the TLBs.
1496 *
1497 * @param pVCpu The cross context virtual CPU structure of the calling
1498 * thread.
1499 * @param GCPtr The address of the page to invalidate
1500 */
1501VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1502{
1503#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1504 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1505 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1506 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1507 uintptr_t idx = (uint8_t)GCPtr;
1508
1509# ifdef IEM_WITH_CODE_TLB
1510 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1511 {
1512 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1513 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 }
1516# endif
1517
1518# ifdef IEM_WITH_DATA_TLB
1519 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1520 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1521# endif
1522#else
1523 NOREF(pVCpu); NOREF(GCPtr);
1524#endif
1525}
1526
1527
1528/**
1529 * Invalidates the host physical aspects of the IEM TLBs.
1530 *
1531 * This is called internally as well as by PGM when moving GC mappings.
1532 *
1533 * @param pVCpu The cross context virtual CPU structure of the calling
1534 * thread.
1535 */
1536VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1537{
1538#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1539 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1540
1541# ifdef IEM_WITH_CODE_TLB
1542 pVCpu->iem.s.cbInstrBufTotal = 0;
1543# endif
1544 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1545 if (uTlbPhysRev != 0)
1546 {
1547 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1548 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1549 }
1550 else
1551 {
1552 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1553 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1554
1555 unsigned i;
1556# ifdef IEM_WITH_CODE_TLB
1557 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1558 while (i-- > 0)
1559 {
1560 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1561 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1562 }
1563# endif
1564# ifdef IEM_WITH_DATA_TLB
1565 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1566 while (i-- > 0)
1567 {
1568 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1569 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1570 }
1571# endif
1572 }
1573#else
1574 NOREF(pVCpu);
1575#endif
1576}
1577
1578
1579/**
1580 * Invalidates the host physical aspects of the IEM TLBs.
1581 *
1582 * This is called internally as well as by PGM when moving GC mappings.
1583 *
1584 * @param pVM The cross context VM structure.
1585 *
1586 * @remarks Caller holds the PGM lock.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1589{
1590 RT_NOREF_PV(pVM);
1591}
1592
1593#ifdef IEM_WITH_CODE_TLB
1594
1595/**
1596 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1597 * failure and jumps.
1598 *
1599 * We end up here for a number of reasons:
1600 * - pbInstrBuf isn't yet initialized.
1601 * - Advancing beyond the buffer boundrary (e.g. cross page).
1602 * - Advancing beyond the CS segment limit.
1603 * - Fetching from non-mappable page (e.g. MMIO).
1604 *
1605 * @param pVCpu The cross context virtual CPU structure of the
1606 * calling thread.
1607 * @param pvDst Where to return the bytes.
1608 * @param cbDst Number of bytes to read.
1609 *
1610 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1611 */
1612IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1613{
1614#ifdef IN_RING3
1615 for (;;)
1616 {
1617 Assert(cbDst <= 8);
1618 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1619
1620 /*
1621 * We might have a partial buffer match, deal with that first to make the
1622 * rest simpler. This is the first part of the cross page/buffer case.
1623 */
1624 if (pVCpu->iem.s.pbInstrBuf != NULL)
1625 {
1626 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1627 {
1628 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1629 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1630 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1631
1632 cbDst -= cbCopy;
1633 pvDst = (uint8_t *)pvDst + cbCopy;
1634 offBuf += cbCopy;
1635 pVCpu->iem.s.offInstrNextByte += offBuf;
1636 }
1637 }
1638
1639 /*
1640 * Check segment limit, figuring how much we're allowed to access at this point.
1641 *
1642 * We will fault immediately if RIP is past the segment limit / in non-canonical
1643 * territory. If we do continue, there are one or more bytes to read before we
1644 * end up in trouble and we need to do that first before faulting.
1645 */
1646 RTGCPTR GCPtrFirst;
1647 uint32_t cbMaxRead;
1648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1649 {
1650 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1651 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1652 { /* likely */ }
1653 else
1654 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1655 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1656 }
1657 else
1658 {
1659 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1660 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1661 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1662 { /* likely */ }
1663 else
1664 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1665 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1666 if (cbMaxRead != 0)
1667 { /* likely */ }
1668 else
1669 {
1670 /* Overflowed because address is 0 and limit is max. */
1671 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1672 cbMaxRead = X86_PAGE_SIZE;
1673 }
1674 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1675 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1676 if (cbMaxRead2 < cbMaxRead)
1677 cbMaxRead = cbMaxRead2;
1678 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1679 }
1680
1681 /*
1682 * Get the TLB entry for this piece of code.
1683 */
1684 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1685 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1686 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1687 if (pTlbe->uTag == uTag)
1688 {
1689 /* likely when executing lots of code, otherwise unlikely */
1690# ifdef VBOX_WITH_STATISTICS
1691 pVCpu->iem.s.CodeTlb.cTlbHits++;
1692# endif
1693 }
1694 else
1695 {
1696 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1697# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1698 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1699 {
1700 pTlbe->uTag = uTag;
1701 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1702 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1703 pTlbe->GCPhys = NIL_RTGCPHYS;
1704 pTlbe->pbMappingR3 = NULL;
1705 }
1706 else
1707# endif
1708 {
1709 RTGCPHYS GCPhys;
1710 uint64_t fFlags;
1711 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1712 if (RT_FAILURE(rc))
1713 {
1714 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1715 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1716 }
1717
1718 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1719 pTlbe->uTag = uTag;
1720 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1721 pTlbe->GCPhys = GCPhys;
1722 pTlbe->pbMappingR3 = NULL;
1723 }
1724 }
1725
1726 /*
1727 * Check TLB page table level access flags.
1728 */
1729 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1730 {
1731 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1732 {
1733 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1734 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1735 }
1736 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1737 {
1738 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1739 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1740 }
1741 }
1742
1743# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1744 /*
1745 * Allow interpretation of patch manager code blocks since they can for
1746 * instance throw #PFs for perfectly good reasons.
1747 */
1748 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1749 { /* no unlikely */ }
1750 else
1751 {
1752 /** @todo Could be optimized this a little in ring-3 if we liked. */
1753 size_t cbRead = 0;
1754 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1755 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1756 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1757 return;
1758 }
1759# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1760
1761 /*
1762 * Look up the physical page info if necessary.
1763 */
1764 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1765 { /* not necessary */ }
1766 else
1767 {
1768 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1769 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1770 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1771 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1772 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1773 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1774 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1775 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1776 }
1777
1778# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1779 /*
1780 * Try do a direct read using the pbMappingR3 pointer.
1781 */
1782 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1783 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1784 {
1785 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1786 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1787 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1788 {
1789 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1790 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1791 }
1792 else
1793 {
1794 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1795 Assert(cbInstr < cbMaxRead);
1796 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1797 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1798 }
1799 if (cbDst <= cbMaxRead)
1800 {
1801 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1802 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1803 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1804 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1805 return;
1806 }
1807 pVCpu->iem.s.pbInstrBuf = NULL;
1808
1809 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1810 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1811 }
1812 else
1813# endif
1814#if 0
1815 /*
1816 * If there is no special read handling, so we can read a bit more and
1817 * put it in the prefetch buffer.
1818 */
1819 if ( cbDst < cbMaxRead
1820 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1821 {
1822 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1823 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1824 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1825 { /* likely */ }
1826 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1827 {
1828 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1829 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1830 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1831 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1832 }
1833 else
1834 {
1835 Log((RT_SUCCESS(rcStrict)
1836 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1837 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1838 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1839 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1840 }
1841 }
1842 /*
1843 * Special read handling, so only read exactly what's needed.
1844 * This is a highly unlikely scenario.
1845 */
1846 else
1847#endif
1848 {
1849 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1850 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1851 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1852 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1853 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1854 { /* likely */ }
1855 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1856 {
1857 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1858 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1860 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1861 }
1862 else
1863 {
1864 Log((RT_SUCCESS(rcStrict)
1865 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1866 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1867 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1868 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1869 }
1870 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1871 if (cbToRead == cbDst)
1872 return;
1873 }
1874
1875 /*
1876 * More to read, loop.
1877 */
1878 cbDst -= cbMaxRead;
1879 pvDst = (uint8_t *)pvDst + cbMaxRead;
1880 }
1881#else
1882 RT_NOREF(pvDst, cbDst);
1883 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1884#endif
1885}
1886
1887#else
1888
1889/**
1890 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1891 * exception if it fails.
1892 *
1893 * @returns Strict VBox status code.
1894 * @param pVCpu The cross context virtual CPU structure of the
1895 * calling thread.
1896 * @param cbMin The minimum number of bytes relative offOpcode
1897 * that must be read.
1898 */
1899IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1900{
1901 /*
1902 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1903 *
1904 * First translate CS:rIP to a physical address.
1905 */
1906 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1907 uint32_t cbToTryRead;
1908 RTGCPTR GCPtrNext;
1909 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1910 {
1911 cbToTryRead = PAGE_SIZE;
1912 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1913 if (!IEM_IS_CANONICAL(GCPtrNext))
1914 return iemRaiseGeneralProtectionFault0(pVCpu);
1915 }
1916 else
1917 {
1918 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1919 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1920 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1921 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1922 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1923 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1924 if (!cbToTryRead) /* overflowed */
1925 {
1926 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1927 cbToTryRead = UINT32_MAX;
1928 /** @todo check out wrapping around the code segment. */
1929 }
1930 if (cbToTryRead < cbMin - cbLeft)
1931 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1932 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1933 }
1934
1935 /* Only read up to the end of the page, and make sure we don't read more
1936 than the opcode buffer can hold. */
1937 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1938 if (cbToTryRead > cbLeftOnPage)
1939 cbToTryRead = cbLeftOnPage;
1940 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1941 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1942/** @todo r=bird: Convert assertion into undefined opcode exception? */
1943 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1944
1945# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1946 /* Allow interpretation of patch manager code blocks since they can for
1947 instance throw #PFs for perfectly good reasons. */
1948 if (pVCpu->iem.s.fInPatchCode)
1949 {
1950 size_t cbRead = 0;
1951 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1952 AssertRCReturn(rc, rc);
1953 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1954 return VINF_SUCCESS;
1955 }
1956# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1957
1958 RTGCPHYS GCPhys;
1959 uint64_t fFlags;
1960 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1961 if (RT_FAILURE(rc))
1962 {
1963 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1964 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1965 }
1966 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1970 }
1971 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1977 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1978 /** @todo Check reserved bits and such stuff. PGM is better at doing
1979 * that, so do it when implementing the guest virtual address
1980 * TLB... */
1981
1982 /*
1983 * Read the bytes at this address.
1984 *
1985 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1986 * and since PATM should only patch the start of an instruction there
1987 * should be no need to check again here.
1988 */
1989 if (!pVCpu->iem.s.fBypassHandlers)
1990 {
1991 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1992 cbToTryRead, PGMACCESSORIGIN_IEM);
1993 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1994 { /* likely */ }
1995 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1996 {
1997 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1998 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1999 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2000 }
2001 else
2002 {
2003 Log((RT_SUCCESS(rcStrict)
2004 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2005 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2006 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2007 return rcStrict;
2008 }
2009 }
2010 else
2011 {
2012 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2013 if (RT_SUCCESS(rc))
2014 { /* likely */ }
2015 else
2016 {
2017 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2018 return rc;
2019 }
2020 }
2021 pVCpu->iem.s.cbOpcode += cbToTryRead;
2022 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2023
2024 return VINF_SUCCESS;
2025}
2026
2027#endif /* !IEM_WITH_CODE_TLB */
2028#ifndef IEM_WITH_SETJMP
2029
2030/**
2031 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2032 *
2033 * @returns Strict VBox status code.
2034 * @param pVCpu The cross context virtual CPU structure of the
2035 * calling thread.
2036 * @param pb Where to return the opcode byte.
2037 */
2038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2039{
2040 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2041 if (rcStrict == VINF_SUCCESS)
2042 {
2043 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2044 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2045 pVCpu->iem.s.offOpcode = offOpcode + 1;
2046 }
2047 else
2048 *pb = 0;
2049 return rcStrict;
2050}
2051
2052
2053/**
2054 * Fetches the next opcode byte.
2055 *
2056 * @returns Strict VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure of the
2058 * calling thread.
2059 * @param pu8 Where to return the opcode byte.
2060 */
2061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2062{
2063 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2064 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2065 {
2066 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2067 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2068 return VINF_SUCCESS;
2069 }
2070 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2071}
2072
2073#else /* IEM_WITH_SETJMP */
2074
2075/**
2076 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2077 *
2078 * @returns The opcode byte.
2079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2080 */
2081DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2082{
2083# ifdef IEM_WITH_CODE_TLB
2084 uint8_t u8;
2085 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2086 return u8;
2087# else
2088 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2089 if (rcStrict == VINF_SUCCESS)
2090 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2091 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2092# endif
2093}
2094
2095
2096/**
2097 * Fetches the next opcode byte, longjmp on error.
2098 *
2099 * @returns The opcode byte.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 */
2102DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2103{
2104# ifdef IEM_WITH_CODE_TLB
2105 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2106 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2107 if (RT_LIKELY( pbBuf != NULL
2108 && offBuf < pVCpu->iem.s.cbInstrBuf))
2109 {
2110 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2111 return pbBuf[offBuf];
2112 }
2113# else
2114 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2115 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2116 {
2117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2118 return pVCpu->iem.s.abOpcode[offOpcode];
2119 }
2120# endif
2121 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2122}
2123
2124#endif /* IEM_WITH_SETJMP */
2125
2126/**
2127 * Fetches the next opcode byte, returns automatically on failure.
2128 *
2129 * @param a_pu8 Where to return the opcode byte.
2130 * @remark Implicitly references pVCpu.
2131 */
2132#ifndef IEM_WITH_SETJMP
2133# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2134 do \
2135 { \
2136 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2137 if (rcStrict2 == VINF_SUCCESS) \
2138 { /* likely */ } \
2139 else \
2140 return rcStrict2; \
2141 } while (0)
2142#else
2143# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2144#endif /* IEM_WITH_SETJMP */
2145
2146
2147#ifndef IEM_WITH_SETJMP
2148/**
2149 * Fetches the next signed byte from the opcode stream.
2150 *
2151 * @returns Strict VBox status code.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 * @param pi8 Where to return the signed byte.
2154 */
2155DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2156{
2157 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2158}
2159#endif /* !IEM_WITH_SETJMP */
2160
2161
2162/**
2163 * Fetches the next signed byte from the opcode stream, returning automatically
2164 * on failure.
2165 *
2166 * @param a_pi8 Where to return the signed byte.
2167 * @remark Implicitly references pVCpu.
2168 */
2169#ifndef IEM_WITH_SETJMP
2170# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2171 do \
2172 { \
2173 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2174 if (rcStrict2 != VINF_SUCCESS) \
2175 return rcStrict2; \
2176 } while (0)
2177#else /* IEM_WITH_SETJMP */
2178# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2179
2180#endif /* IEM_WITH_SETJMP */
2181
2182#ifndef IEM_WITH_SETJMP
2183
2184/**
2185 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2186 *
2187 * @returns Strict VBox status code.
2188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2189 * @param pu16 Where to return the opcode dword.
2190 */
2191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2192{
2193 uint8_t u8;
2194 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2195 if (rcStrict == VINF_SUCCESS)
2196 *pu16 = (int8_t)u8;
2197 return rcStrict;
2198}
2199
2200
2201/**
2202 * Fetches the next signed byte from the opcode stream, extending it to
2203 * unsigned 16-bit.
2204 *
2205 * @returns Strict VBox status code.
2206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2207 * @param pu16 Where to return the unsigned word.
2208 */
2209DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2210{
2211 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2212 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2213 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2214
2215 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2216 pVCpu->iem.s.offOpcode = offOpcode + 1;
2217 return VINF_SUCCESS;
2218}
2219
2220#endif /* !IEM_WITH_SETJMP */
2221
2222/**
2223 * Fetches the next signed byte from the opcode stream and sign-extending it to
2224 * a word, returning automatically on failure.
2225 *
2226 * @param a_pu16 Where to return the word.
2227 * @remark Implicitly references pVCpu.
2228 */
2229#ifndef IEM_WITH_SETJMP
2230# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2231 do \
2232 { \
2233 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2234 if (rcStrict2 != VINF_SUCCESS) \
2235 return rcStrict2; \
2236 } while (0)
2237#else
2238# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2239#endif
2240
2241#ifndef IEM_WITH_SETJMP
2242
2243/**
2244 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2245 *
2246 * @returns Strict VBox status code.
2247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2248 * @param pu32 Where to return the opcode dword.
2249 */
2250DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2251{
2252 uint8_t u8;
2253 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2254 if (rcStrict == VINF_SUCCESS)
2255 *pu32 = (int8_t)u8;
2256 return rcStrict;
2257}
2258
2259
2260/**
2261 * Fetches the next signed byte from the opcode stream, extending it to
2262 * unsigned 32-bit.
2263 *
2264 * @returns Strict VBox status code.
2265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2266 * @param pu32 Where to return the unsigned dword.
2267 */
2268DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2269{
2270 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2271 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2272 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2273
2274 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2275 pVCpu->iem.s.offOpcode = offOpcode + 1;
2276 return VINF_SUCCESS;
2277}
2278
2279#endif /* !IEM_WITH_SETJMP */
2280
2281/**
2282 * Fetches the next signed byte from the opcode stream and sign-extending it to
2283 * a word, returning automatically on failure.
2284 *
2285 * @param a_pu32 Where to return the word.
2286 * @remark Implicitly references pVCpu.
2287 */
2288#ifndef IEM_WITH_SETJMP
2289#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2290 do \
2291 { \
2292 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2293 if (rcStrict2 != VINF_SUCCESS) \
2294 return rcStrict2; \
2295 } while (0)
2296#else
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2298#endif
2299
2300#ifndef IEM_WITH_SETJMP
2301
2302/**
2303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2304 *
2305 * @returns Strict VBox status code.
2306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2307 * @param pu64 Where to return the opcode qword.
2308 */
2309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2310{
2311 uint8_t u8;
2312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2313 if (rcStrict == VINF_SUCCESS)
2314 *pu64 = (int8_t)u8;
2315 return rcStrict;
2316}
2317
2318
2319/**
2320 * Fetches the next signed byte from the opcode stream, extending it to
2321 * unsigned 64-bit.
2322 *
2323 * @returns Strict VBox status code.
2324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2325 * @param pu64 Where to return the unsigned qword.
2326 */
2327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2328{
2329 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2330 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2331 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2332
2333 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2334 pVCpu->iem.s.offOpcode = offOpcode + 1;
2335 return VINF_SUCCESS;
2336}
2337
2338#endif /* !IEM_WITH_SETJMP */
2339
2340
2341/**
2342 * Fetches the next signed byte from the opcode stream and sign-extending it to
2343 * a word, returning automatically on failure.
2344 *
2345 * @param a_pu64 Where to return the word.
2346 * @remark Implicitly references pVCpu.
2347 */
2348#ifndef IEM_WITH_SETJMP
2349# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2350 do \
2351 { \
2352 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2353 if (rcStrict2 != VINF_SUCCESS) \
2354 return rcStrict2; \
2355 } while (0)
2356#else
2357# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2358#endif
2359
2360
2361#ifndef IEM_WITH_SETJMP
2362
2363/**
2364 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2365 *
2366 * @returns Strict VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2368 * @param pu16 Where to return the opcode word.
2369 */
2370DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2371{
2372 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2373 if (rcStrict == VINF_SUCCESS)
2374 {
2375 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2376# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2377 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2378# else
2379 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2380# endif
2381 pVCpu->iem.s.offOpcode = offOpcode + 2;
2382 }
2383 else
2384 *pu16 = 0;
2385 return rcStrict;
2386}
2387
2388
2389/**
2390 * Fetches the next opcode word.
2391 *
2392 * @returns Strict VBox status code.
2393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2394 * @param pu16 Where to return the opcode word.
2395 */
2396DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2397{
2398 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2399 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2400 {
2401 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2402# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2403 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2404# else
2405 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2406# endif
2407 return VINF_SUCCESS;
2408 }
2409 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2410}
2411
2412#else /* IEM_WITH_SETJMP */
2413
2414/**
2415 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2416 *
2417 * @returns The opcode word.
2418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2419 */
2420DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2421{
2422# ifdef IEM_WITH_CODE_TLB
2423 uint16_t u16;
2424 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2425 return u16;
2426# else
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431 pVCpu->iem.s.offOpcode += 2;
2432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2433 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2434# else
2435 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2436# endif
2437 }
2438 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2439# endif
2440}
2441
2442
2443/**
2444 * Fetches the next opcode word, longjmp on error.
2445 *
2446 * @returns The opcode word.
2447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2448 */
2449DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2450{
2451# ifdef IEM_WITH_CODE_TLB
2452 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2453 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2454 if (RT_LIKELY( pbBuf != NULL
2455 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2456 {
2457 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 return *(uint16_t const *)&pbBuf[offBuf];
2460# else
2461 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2462# endif
2463 }
2464# else
2465 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2466 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2467 {
2468 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2469# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2470 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2471# else
2472 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2473# endif
2474 }
2475# endif
2476 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2477}
2478
2479#endif /* IEM_WITH_SETJMP */
2480
2481
2482/**
2483 * Fetches the next opcode word, returns automatically on failure.
2484 *
2485 * @param a_pu16 Where to return the opcode word.
2486 * @remark Implicitly references pVCpu.
2487 */
2488#ifndef IEM_WITH_SETJMP
2489# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2490 do \
2491 { \
2492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2493 if (rcStrict2 != VINF_SUCCESS) \
2494 return rcStrict2; \
2495 } while (0)
2496#else
2497# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2498#endif
2499
2500#ifndef IEM_WITH_SETJMP
2501
2502/**
2503 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2504 *
2505 * @returns Strict VBox status code.
2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2507 * @param pu32 Where to return the opcode double word.
2508 */
2509DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2510{
2511 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2512 if (rcStrict == VINF_SUCCESS)
2513 {
2514 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2515 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2516 pVCpu->iem.s.offOpcode = offOpcode + 2;
2517 }
2518 else
2519 *pu32 = 0;
2520 return rcStrict;
2521}
2522
2523
2524/**
2525 * Fetches the next opcode word, zero extending it to a double word.
2526 *
2527 * @returns Strict VBox status code.
2528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2529 * @param pu32 Where to return the opcode double word.
2530 */
2531DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2532{
2533 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2535 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2536
2537 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2538 pVCpu->iem.s.offOpcode = offOpcode + 2;
2539 return VINF_SUCCESS;
2540}
2541
2542#endif /* !IEM_WITH_SETJMP */
2543
2544
2545/**
2546 * Fetches the next opcode word and zero extends it to a double word, returns
2547 * automatically on failure.
2548 *
2549 * @param a_pu32 Where to return the opcode double word.
2550 * @remark Implicitly references pVCpu.
2551 */
2552#ifndef IEM_WITH_SETJMP
2553# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2554 do \
2555 { \
2556 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2557 if (rcStrict2 != VINF_SUCCESS) \
2558 return rcStrict2; \
2559 } while (0)
2560#else
2561# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2562#endif
2563
2564#ifndef IEM_WITH_SETJMP
2565
2566/**
2567 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2568 *
2569 * @returns Strict VBox status code.
2570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2571 * @param pu64 Where to return the opcode quad word.
2572 */
2573DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2574{
2575 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2576 if (rcStrict == VINF_SUCCESS)
2577 {
2578 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2579 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2580 pVCpu->iem.s.offOpcode = offOpcode + 2;
2581 }
2582 else
2583 *pu64 = 0;
2584 return rcStrict;
2585}
2586
2587
2588/**
2589 * Fetches the next opcode word, zero extending it to a quad word.
2590 *
2591 * @returns Strict VBox status code.
2592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2593 * @param pu64 Where to return the opcode quad word.
2594 */
2595DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2596{
2597 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2598 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2599 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2600
2601 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2602 pVCpu->iem.s.offOpcode = offOpcode + 2;
2603 return VINF_SUCCESS;
2604}
2605
2606#endif /* !IEM_WITH_SETJMP */
2607
2608/**
2609 * Fetches the next opcode word and zero extends it to a quad word, returns
2610 * automatically on failure.
2611 *
2612 * @param a_pu64 Where to return the opcode quad word.
2613 * @remark Implicitly references pVCpu.
2614 */
2615#ifndef IEM_WITH_SETJMP
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2617 do \
2618 { \
2619 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2620 if (rcStrict2 != VINF_SUCCESS) \
2621 return rcStrict2; \
2622 } while (0)
2623#else
2624# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2625#endif
2626
2627
2628#ifndef IEM_WITH_SETJMP
2629/**
2630 * Fetches the next signed word from the opcode stream.
2631 *
2632 * @returns Strict VBox status code.
2633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2634 * @param pi16 Where to return the signed word.
2635 */
2636DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2637{
2638 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2639}
2640#endif /* !IEM_WITH_SETJMP */
2641
2642
2643/**
2644 * Fetches the next signed word from the opcode stream, returning automatically
2645 * on failure.
2646 *
2647 * @param a_pi16 Where to return the signed word.
2648 * @remark Implicitly references pVCpu.
2649 */
2650#ifndef IEM_WITH_SETJMP
2651# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2652 do \
2653 { \
2654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2655 if (rcStrict2 != VINF_SUCCESS) \
2656 return rcStrict2; \
2657 } while (0)
2658#else
2659# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2660#endif
2661
2662#ifndef IEM_WITH_SETJMP
2663
2664/**
2665 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2666 *
2667 * @returns Strict VBox status code.
2668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2669 * @param pu32 Where to return the opcode dword.
2670 */
2671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2672{
2673 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2674 if (rcStrict == VINF_SUCCESS)
2675 {
2676 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2677# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2678 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2679# else
2680 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2681 pVCpu->iem.s.abOpcode[offOpcode + 1],
2682 pVCpu->iem.s.abOpcode[offOpcode + 2],
2683 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2684# endif
2685 pVCpu->iem.s.offOpcode = offOpcode + 4;
2686 }
2687 else
2688 *pu32 = 0;
2689 return rcStrict;
2690}
2691
2692
2693/**
2694 * Fetches the next opcode dword.
2695 *
2696 * @returns Strict VBox status code.
2697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2698 * @param pu32 Where to return the opcode double word.
2699 */
2700DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2701{
2702 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2703 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2704 {
2705 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2706# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2707 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2708# else
2709 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2710 pVCpu->iem.s.abOpcode[offOpcode + 1],
2711 pVCpu->iem.s.abOpcode[offOpcode + 2],
2712 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2713# endif
2714 return VINF_SUCCESS;
2715 }
2716 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2717}
2718
2719#else /* !IEM_WITH_SETJMP */
2720
2721/**
2722 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2723 *
2724 * @returns The opcode dword.
2725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2726 */
2727DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2728{
2729# ifdef IEM_WITH_CODE_TLB
2730 uint32_t u32;
2731 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2732 return u32;
2733# else
2734 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2735 if (rcStrict == VINF_SUCCESS)
2736 {
2737 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2738 pVCpu->iem.s.offOpcode = offOpcode + 4;
2739# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2740 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2741# else
2742 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746# endif
2747 }
2748 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2749# endif
2750}
2751
2752
2753/**
2754 * Fetches the next opcode dword, longjmp on error.
2755 *
2756 * @returns The opcode dword.
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 */
2759DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2760{
2761# ifdef IEM_WITH_CODE_TLB
2762 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2763 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2764 if (RT_LIKELY( pbBuf != NULL
2765 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2766 {
2767 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2768# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2769 return *(uint32_t const *)&pbBuf[offBuf];
2770# else
2771 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2772 pbBuf[offBuf + 1],
2773 pbBuf[offBuf + 2],
2774 pbBuf[offBuf + 3]);
2775# endif
2776 }
2777# else
2778 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2779 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2780 {
2781 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2782# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2783 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2784# else
2785 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2786 pVCpu->iem.s.abOpcode[offOpcode + 1],
2787 pVCpu->iem.s.abOpcode[offOpcode + 2],
2788 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2789# endif
2790 }
2791# endif
2792 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2793}
2794
2795#endif /* !IEM_WITH_SETJMP */
2796
2797
2798/**
2799 * Fetches the next opcode dword, returns automatically on failure.
2800 *
2801 * @param a_pu32 Where to return the opcode dword.
2802 * @remark Implicitly references pVCpu.
2803 */
2804#ifndef IEM_WITH_SETJMP
2805# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2806 do \
2807 { \
2808 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2809 if (rcStrict2 != VINF_SUCCESS) \
2810 return rcStrict2; \
2811 } while (0)
2812#else
2813# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2814#endif
2815
2816#ifndef IEM_WITH_SETJMP
2817
2818/**
2819 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2820 *
2821 * @returns Strict VBox status code.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 * @param pu64 Where to return the opcode dword.
2824 */
2825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2826{
2827 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2828 if (rcStrict == VINF_SUCCESS)
2829 {
2830 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2831 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2832 pVCpu->iem.s.abOpcode[offOpcode + 1],
2833 pVCpu->iem.s.abOpcode[offOpcode + 2],
2834 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2835 pVCpu->iem.s.offOpcode = offOpcode + 4;
2836 }
2837 else
2838 *pu64 = 0;
2839 return rcStrict;
2840}
2841
2842
2843/**
2844 * Fetches the next opcode dword, zero extending it to a quad word.
2845 *
2846 * @returns Strict VBox status code.
2847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2848 * @param pu64 Where to return the opcode quad word.
2849 */
2850DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2851{
2852 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2853 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2854 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2855
2856 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2857 pVCpu->iem.s.abOpcode[offOpcode + 1],
2858 pVCpu->iem.s.abOpcode[offOpcode + 2],
2859 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2860 pVCpu->iem.s.offOpcode = offOpcode + 4;
2861 return VINF_SUCCESS;
2862}
2863
2864#endif /* !IEM_WITH_SETJMP */
2865
2866
2867/**
2868 * Fetches the next opcode dword and zero extends it to a quad word, returns
2869 * automatically on failure.
2870 *
2871 * @param a_pu64 Where to return the opcode quad word.
2872 * @remark Implicitly references pVCpu.
2873 */
2874#ifndef IEM_WITH_SETJMP
2875# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2876 do \
2877 { \
2878 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2879 if (rcStrict2 != VINF_SUCCESS) \
2880 return rcStrict2; \
2881 } while (0)
2882#else
2883# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2884#endif
2885
2886
2887#ifndef IEM_WITH_SETJMP
2888/**
2889 * Fetches the next signed double word from the opcode stream.
2890 *
2891 * @returns Strict VBox status code.
2892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2893 * @param pi32 Where to return the signed double word.
2894 */
2895DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2896{
2897 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2898}
2899#endif
2900
2901/**
2902 * Fetches the next signed double word from the opcode stream, returning
2903 * automatically on failure.
2904 *
2905 * @param a_pi32 Where to return the signed double word.
2906 * @remark Implicitly references pVCpu.
2907 */
2908#ifndef IEM_WITH_SETJMP
2909# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2910 do \
2911 { \
2912 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2913 if (rcStrict2 != VINF_SUCCESS) \
2914 return rcStrict2; \
2915 } while (0)
2916#else
2917# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2918#endif
2919
2920#ifndef IEM_WITH_SETJMP
2921
2922/**
2923 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2924 *
2925 * @returns Strict VBox status code.
2926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2927 * @param pu64 Where to return the opcode qword.
2928 */
2929DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2930{
2931 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2932 if (rcStrict == VINF_SUCCESS)
2933 {
2934 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2935 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2936 pVCpu->iem.s.abOpcode[offOpcode + 1],
2937 pVCpu->iem.s.abOpcode[offOpcode + 2],
2938 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2939 pVCpu->iem.s.offOpcode = offOpcode + 4;
2940 }
2941 else
2942 *pu64 = 0;
2943 return rcStrict;
2944}
2945
2946
2947/**
2948 * Fetches the next opcode dword, sign extending it into a quad word.
2949 *
2950 * @returns Strict VBox status code.
2951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2952 * @param pu64 Where to return the opcode quad word.
2953 */
2954DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2955{
2956 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2957 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2958 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2959
2960 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2961 pVCpu->iem.s.abOpcode[offOpcode + 1],
2962 pVCpu->iem.s.abOpcode[offOpcode + 2],
2963 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2964 *pu64 = i32;
2965 pVCpu->iem.s.offOpcode = offOpcode + 4;
2966 return VINF_SUCCESS;
2967}
2968
2969#endif /* !IEM_WITH_SETJMP */
2970
2971
2972/**
2973 * Fetches the next opcode double word and sign extends it to a quad word,
2974 * returns automatically on failure.
2975 *
2976 * @param a_pu64 Where to return the opcode quad word.
2977 * @remark Implicitly references pVCpu.
2978 */
2979#ifndef IEM_WITH_SETJMP
2980# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2981 do \
2982 { \
2983 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2984 if (rcStrict2 != VINF_SUCCESS) \
2985 return rcStrict2; \
2986 } while (0)
2987#else
2988# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2989#endif
2990
2991#ifndef IEM_WITH_SETJMP
2992
2993/**
2994 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2995 *
2996 * @returns Strict VBox status code.
2997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2998 * @param pu64 Where to return the opcode qword.
2999 */
3000DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3001{
3002 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3003 if (rcStrict == VINF_SUCCESS)
3004 {
3005 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3006# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3007 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3008# else
3009 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3010 pVCpu->iem.s.abOpcode[offOpcode + 1],
3011 pVCpu->iem.s.abOpcode[offOpcode + 2],
3012 pVCpu->iem.s.abOpcode[offOpcode + 3],
3013 pVCpu->iem.s.abOpcode[offOpcode + 4],
3014 pVCpu->iem.s.abOpcode[offOpcode + 5],
3015 pVCpu->iem.s.abOpcode[offOpcode + 6],
3016 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3017# endif
3018 pVCpu->iem.s.offOpcode = offOpcode + 8;
3019 }
3020 else
3021 *pu64 = 0;
3022 return rcStrict;
3023}
3024
3025
3026/**
3027 * Fetches the next opcode qword.
3028 *
3029 * @returns Strict VBox status code.
3030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3031 * @param pu64 Where to return the opcode qword.
3032 */
3033DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3034{
3035 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3036 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3037 {
3038# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3039 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3040# else
3041 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3042 pVCpu->iem.s.abOpcode[offOpcode + 1],
3043 pVCpu->iem.s.abOpcode[offOpcode + 2],
3044 pVCpu->iem.s.abOpcode[offOpcode + 3],
3045 pVCpu->iem.s.abOpcode[offOpcode + 4],
3046 pVCpu->iem.s.abOpcode[offOpcode + 5],
3047 pVCpu->iem.s.abOpcode[offOpcode + 6],
3048 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3049# endif
3050 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3051 return VINF_SUCCESS;
3052 }
3053 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3054}
3055
3056#else /* IEM_WITH_SETJMP */
3057
3058/**
3059 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3060 *
3061 * @returns The opcode qword.
3062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3063 */
3064DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3065{
3066# ifdef IEM_WITH_CODE_TLB
3067 uint64_t u64;
3068 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3069 return u64;
3070# else
3071 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3072 if (rcStrict == VINF_SUCCESS)
3073 {
3074 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3075 pVCpu->iem.s.offOpcode = offOpcode + 8;
3076# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3077 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3078# else
3079 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3080 pVCpu->iem.s.abOpcode[offOpcode + 1],
3081 pVCpu->iem.s.abOpcode[offOpcode + 2],
3082 pVCpu->iem.s.abOpcode[offOpcode + 3],
3083 pVCpu->iem.s.abOpcode[offOpcode + 4],
3084 pVCpu->iem.s.abOpcode[offOpcode + 5],
3085 pVCpu->iem.s.abOpcode[offOpcode + 6],
3086 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3087# endif
3088 }
3089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3090# endif
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword, longjmp on error.
3096 *
3097 * @returns The opcode qword.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 */
3100DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3101{
3102# ifdef IEM_WITH_CODE_TLB
3103 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3104 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3105 if (RT_LIKELY( pbBuf != NULL
3106 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3107 {
3108 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3109# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3110 return *(uint64_t const *)&pbBuf[offBuf];
3111# else
3112 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3113 pbBuf[offBuf + 1],
3114 pbBuf[offBuf + 2],
3115 pbBuf[offBuf + 3],
3116 pbBuf[offBuf + 4],
3117 pbBuf[offBuf + 5],
3118 pbBuf[offBuf + 6],
3119 pbBuf[offBuf + 7]);
3120# endif
3121 }
3122# else
3123 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3124 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3125 {
3126 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3127# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3128 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3129# else
3130 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3131 pVCpu->iem.s.abOpcode[offOpcode + 1],
3132 pVCpu->iem.s.abOpcode[offOpcode + 2],
3133 pVCpu->iem.s.abOpcode[offOpcode + 3],
3134 pVCpu->iem.s.abOpcode[offOpcode + 4],
3135 pVCpu->iem.s.abOpcode[offOpcode + 5],
3136 pVCpu->iem.s.abOpcode[offOpcode + 6],
3137 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3138# endif
3139 }
3140# endif
3141 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3142}
3143
3144#endif /* IEM_WITH_SETJMP */
3145
3146/**
3147 * Fetches the next opcode quad word, returns automatically on failure.
3148 *
3149 * @param a_pu64 Where to return the opcode quad word.
3150 * @remark Implicitly references pVCpu.
3151 */
3152#ifndef IEM_WITH_SETJMP
3153# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3154 do \
3155 { \
3156 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3157 if (rcStrict2 != VINF_SUCCESS) \
3158 return rcStrict2; \
3159 } while (0)
3160#else
3161# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3162#endif
3163
3164
3165/** @name Misc Worker Functions.
3166 * @{
3167 */
3168
3169/**
3170 * Gets the exception class for the specified exception vector.
3171 *
3172 * @returns The class of the specified exception.
3173 * @param uVector The exception vector.
3174 */
3175IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3176{
3177 Assert(uVector <= X86_XCPT_LAST);
3178 switch (uVector)
3179 {
3180 case X86_XCPT_DE:
3181 case X86_XCPT_TS:
3182 case X86_XCPT_NP:
3183 case X86_XCPT_SS:
3184 case X86_XCPT_GP:
3185 case X86_XCPT_SX: /* AMD only */
3186 return IEMXCPTCLASS_CONTRIBUTORY;
3187
3188 case X86_XCPT_PF:
3189 case X86_XCPT_VE: /* Intel only */
3190 return IEMXCPTCLASS_PAGE_FAULT;
3191
3192 case X86_XCPT_DF:
3193 return IEMXCPTCLASS_DOUBLE_FAULT;
3194 }
3195 return IEMXCPTCLASS_BENIGN;
3196}
3197
3198
3199/**
3200 * Evaluates how to handle an exception caused during delivery of another event
3201 * (exception / interrupt).
3202 *
3203 * @returns How to handle the recursive exception.
3204 * @param pVCpu The cross context virtual CPU structure of the
3205 * calling thread.
3206 * @param fPrevFlags The flags of the previous event.
3207 * @param uPrevVector The vector of the previous event.
3208 * @param fCurFlags The flags of the current exception.
3209 * @param uCurVector The vector of the current exception.
3210 * @param pfXcptRaiseInfo Where to store additional information about the
3211 * exception condition. Optional.
3212 */
3213VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3214 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3215{
3216 /*
3217 * Only CPU exceptions can be raised while delivering other events, software interrupt
3218 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3219 */
3220 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3221 Assert(pVCpu); RT_NOREF(pVCpu);
3222 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3223
3224 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3225 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3226 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3227 {
3228 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3229 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3230 {
3231 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3232 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3233 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3234 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3235 {
3236 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3237 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3238 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3239 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3240 uCurVector, pVCpu->cpum.GstCtx.cr2));
3241 }
3242 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3243 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3244 {
3245 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3246 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3247 }
3248 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3249 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3250 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3251 {
3252 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3253 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3254 }
3255 }
3256 else
3257 {
3258 if (uPrevVector == X86_XCPT_NMI)
3259 {
3260 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3261 if (uCurVector == X86_XCPT_PF)
3262 {
3263 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3264 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3265 }
3266 }
3267 else if ( uPrevVector == X86_XCPT_AC
3268 && uCurVector == X86_XCPT_AC)
3269 {
3270 enmRaise = IEMXCPTRAISE_CPU_HANG;
3271 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3272 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3273 }
3274 }
3275 }
3276 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3277 {
3278 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3279 if (uCurVector == X86_XCPT_PF)
3280 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3281 }
3282 else
3283 {
3284 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3285 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3286 }
3287
3288 if (pfXcptRaiseInfo)
3289 *pfXcptRaiseInfo = fRaiseInfo;
3290 return enmRaise;
3291}
3292
3293
3294/**
3295 * Enters the CPU shutdown state initiated by a triple fault or other
3296 * unrecoverable conditions.
3297 *
3298 * @returns Strict VBox status code.
3299 * @param pVCpu The cross context virtual CPU structure of the
3300 * calling thread.
3301 */
3302IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3303{
3304 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3305 {
3306 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3307 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3308 }
3309
3310 RT_NOREF(pVCpu);
3311 return VINF_EM_TRIPLE_FAULT;
3312}
3313
3314
3315/**
3316 * Validates a new SS segment.
3317 *
3318 * @returns VBox strict status code.
3319 * @param pVCpu The cross context virtual CPU structure of the
3320 * calling thread.
3321 * @param NewSS The new SS selctor.
3322 * @param uCpl The CPL to load the stack for.
3323 * @param pDesc Where to return the descriptor.
3324 */
3325IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3326{
3327 /* Null selectors are not allowed (we're not called for dispatching
3328 interrupts with SS=0 in long mode). */
3329 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3330 {
3331 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3332 return iemRaiseTaskSwitchFault0(pVCpu);
3333 }
3334
3335 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3336 if ((NewSS & X86_SEL_RPL) != uCpl)
3337 {
3338 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3339 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3340 }
3341
3342 /*
3343 * Read the descriptor.
3344 */
3345 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3346 if (rcStrict != VINF_SUCCESS)
3347 return rcStrict;
3348
3349 /*
3350 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3351 */
3352 if (!pDesc->Legacy.Gen.u1DescType)
3353 {
3354 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3356 }
3357
3358 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3359 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3360 {
3361 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3362 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3363 }
3364 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3365 {
3366 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3367 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3368 }
3369
3370 /* Is it there? */
3371 /** @todo testcase: Is this checked before the canonical / limit check below? */
3372 if (!pDesc->Legacy.Gen.u1Present)
3373 {
3374 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3375 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3376 }
3377
3378 return VINF_SUCCESS;
3379}
3380
3381
3382/**
3383 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3384 * not.
3385 *
3386 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3387 */
3388#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3389# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3390#else
3391# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3392#endif
3393
3394/**
3395 * Updates the EFLAGS in the correct manner wrt. PATM.
3396 *
3397 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3398 * @param a_fEfl The new EFLAGS.
3399 */
3400#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3401# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3402#else
3403# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3404#endif
3405
3406
3407/** @} */
3408
3409/** @name Raising Exceptions.
3410 *
3411 * @{
3412 */
3413
3414
3415/**
3416 * Loads the specified stack far pointer from the TSS.
3417 *
3418 * @returns VBox strict status code.
3419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3420 * @param uCpl The CPL to load the stack for.
3421 * @param pSelSS Where to return the new stack segment.
3422 * @param puEsp Where to return the new stack pointer.
3423 */
3424IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3425{
3426 VBOXSTRICTRC rcStrict;
3427 Assert(uCpl < 4);
3428
3429 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3430 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3431 {
3432 /*
3433 * 16-bit TSS (X86TSS16).
3434 */
3435 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3436 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3437 {
3438 uint32_t off = uCpl * 4 + 2;
3439 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3440 {
3441 /** @todo check actual access pattern here. */
3442 uint32_t u32Tmp = 0; /* gcc maybe... */
3443 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3444 if (rcStrict == VINF_SUCCESS)
3445 {
3446 *puEsp = RT_LOWORD(u32Tmp);
3447 *pSelSS = RT_HIWORD(u32Tmp);
3448 return VINF_SUCCESS;
3449 }
3450 }
3451 else
3452 {
3453 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3454 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3455 }
3456 break;
3457 }
3458
3459 /*
3460 * 32-bit TSS (X86TSS32).
3461 */
3462 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3463 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3464 {
3465 uint32_t off = uCpl * 8 + 4;
3466 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3467 {
3468/** @todo check actual access pattern here. */
3469 uint64_t u64Tmp;
3470 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3471 if (rcStrict == VINF_SUCCESS)
3472 {
3473 *puEsp = u64Tmp & UINT32_MAX;
3474 *pSelSS = (RTSEL)(u64Tmp >> 32);
3475 return VINF_SUCCESS;
3476 }
3477 }
3478 else
3479 {
3480 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3481 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3482 }
3483 break;
3484 }
3485
3486 default:
3487 AssertFailed();
3488 rcStrict = VERR_IEM_IPE_4;
3489 break;
3490 }
3491
3492 *puEsp = 0; /* make gcc happy */
3493 *pSelSS = 0; /* make gcc happy */
3494 return rcStrict;
3495}
3496
3497
3498/**
3499 * Loads the specified stack pointer from the 64-bit TSS.
3500 *
3501 * @returns VBox strict status code.
3502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3503 * @param uCpl The CPL to load the stack for.
3504 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3505 * @param puRsp Where to return the new stack pointer.
3506 */
3507IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3508{
3509 Assert(uCpl < 4);
3510 Assert(uIst < 8);
3511 *puRsp = 0; /* make gcc happy */
3512
3513 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3514 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3515
3516 uint32_t off;
3517 if (uIst)
3518 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3519 else
3520 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3521 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3522 {
3523 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3524 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526
3527 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3528}
3529
3530
3531/**
3532 * Adjust the CPU state according to the exception being raised.
3533 *
3534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3535 * @param u8Vector The exception that has been raised.
3536 */
3537DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3538{
3539 switch (u8Vector)
3540 {
3541 case X86_XCPT_DB:
3542 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3543 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3544 break;
3545 /** @todo Read the AMD and Intel exception reference... */
3546 }
3547}
3548
3549
3550/**
3551 * Implements exceptions and interrupts for real mode.
3552 *
3553 * @returns VBox strict status code.
3554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3555 * @param cbInstr The number of bytes to offset rIP by in the return
3556 * address.
3557 * @param u8Vector The interrupt / exception vector number.
3558 * @param fFlags The flags.
3559 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3560 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3561 */
3562IEM_STATIC VBOXSTRICTRC
3563iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3564 uint8_t cbInstr,
3565 uint8_t u8Vector,
3566 uint32_t fFlags,
3567 uint16_t uErr,
3568 uint64_t uCr2)
3569{
3570 NOREF(uErr); NOREF(uCr2);
3571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3572
3573 /*
3574 * Read the IDT entry.
3575 */
3576 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3577 {
3578 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3579 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3580 }
3581 RTFAR16 Idte;
3582 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3583 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3584 {
3585 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3586 return rcStrict;
3587 }
3588
3589 /*
3590 * Push the stack frame.
3591 */
3592 uint16_t *pu16Frame;
3593 uint64_t uNewRsp;
3594 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3595 if (rcStrict != VINF_SUCCESS)
3596 return rcStrict;
3597
3598 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3599#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3600 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3601 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3602 fEfl |= UINT16_C(0xf000);
3603#endif
3604 pu16Frame[2] = (uint16_t)fEfl;
3605 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3606 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3607 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3608 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3609 return rcStrict;
3610
3611 /*
3612 * Load the vector address into cs:ip and make exception specific state
3613 * adjustments.
3614 */
3615 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3616 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3617 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3618 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3619 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3620 pVCpu->cpum.GstCtx.rip = Idte.off;
3621 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3622 IEMMISC_SET_EFL(pVCpu, fEfl);
3623
3624 /** @todo do we actually do this in real mode? */
3625 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3626 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3627
3628 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3629}
3630
3631
3632/**
3633 * Loads a NULL data selector into when coming from V8086 mode.
3634 *
3635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3636 * @param pSReg Pointer to the segment register.
3637 */
3638IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3639{
3640 pSReg->Sel = 0;
3641 pSReg->ValidSel = 0;
3642 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3643 {
3644 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3645 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3646 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3647 }
3648 else
3649 {
3650 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3651 /** @todo check this on AMD-V */
3652 pSReg->u64Base = 0;
3653 pSReg->u32Limit = 0;
3654 }
3655}
3656
3657
3658/**
3659 * Loads a segment selector during a task switch in V8086 mode.
3660 *
3661 * @param pSReg Pointer to the segment register.
3662 * @param uSel The selector value to load.
3663 */
3664IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3665{
3666 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3667 pSReg->Sel = uSel;
3668 pSReg->ValidSel = uSel;
3669 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3670 pSReg->u64Base = uSel << 4;
3671 pSReg->u32Limit = 0xffff;
3672 pSReg->Attr.u = 0xf3;
3673}
3674
3675
3676/**
3677 * Loads a NULL data selector into a selector register, both the hidden and
3678 * visible parts, in protected mode.
3679 *
3680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3681 * @param pSReg Pointer to the segment register.
3682 * @param uRpl The RPL.
3683 */
3684IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3685{
3686 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3687 * data selector in protected mode. */
3688 pSReg->Sel = uRpl;
3689 pSReg->ValidSel = uRpl;
3690 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3691 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3692 {
3693 /* VT-x (Intel 3960x) observed doing something like this. */
3694 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3695 pSReg->u32Limit = UINT32_MAX;
3696 pSReg->u64Base = 0;
3697 }
3698 else
3699 {
3700 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3701 pSReg->u32Limit = 0;
3702 pSReg->u64Base = 0;
3703 }
3704}
3705
3706
3707/**
3708 * Loads a segment selector during a task switch in protected mode.
3709 *
3710 * In this task switch scenario, we would throw \#TS exceptions rather than
3711 * \#GPs.
3712 *
3713 * @returns VBox strict status code.
3714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3715 * @param pSReg Pointer to the segment register.
3716 * @param uSel The new selector value.
3717 *
3718 * @remarks This does _not_ handle CS or SS.
3719 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3720 */
3721IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3722{
3723 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3724
3725 /* Null data selector. */
3726 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3727 {
3728 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3729 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3731 return VINF_SUCCESS;
3732 }
3733
3734 /* Fetch the descriptor. */
3735 IEMSELDESC Desc;
3736 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3737 if (rcStrict != VINF_SUCCESS)
3738 {
3739 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3740 VBOXSTRICTRC_VAL(rcStrict)));
3741 return rcStrict;
3742 }
3743
3744 /* Must be a data segment or readable code segment. */
3745 if ( !Desc.Legacy.Gen.u1DescType
3746 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3747 {
3748 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3749 Desc.Legacy.Gen.u4Type));
3750 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3751 }
3752
3753 /* Check privileges for data segments and non-conforming code segments. */
3754 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3755 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3756 {
3757 /* The RPL and the new CPL must be less than or equal to the DPL. */
3758 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3759 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3760 {
3761 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3762 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3763 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3764 }
3765 }
3766
3767 /* Is it there? */
3768 if (!Desc.Legacy.Gen.u1Present)
3769 {
3770 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3771 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3772 }
3773
3774 /* The base and limit. */
3775 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3776 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3777
3778 /*
3779 * Ok, everything checked out fine. Now set the accessed bit before
3780 * committing the result into the registers.
3781 */
3782 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3783 {
3784 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3788 }
3789
3790 /* Commit */
3791 pSReg->Sel = uSel;
3792 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3793 pSReg->u32Limit = cbLimit;
3794 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3795 pSReg->ValidSel = uSel;
3796 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3797 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3798 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3799
3800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3801 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3802 return VINF_SUCCESS;
3803}
3804
3805
3806/**
3807 * Performs a task switch.
3808 *
3809 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3810 * caller is responsible for performing the necessary checks (like DPL, TSS
3811 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3812 * reference for JMP, CALL, IRET.
3813 *
3814 * If the task switch is the due to a software interrupt or hardware exception,
3815 * the caller is responsible for validating the TSS selector and descriptor. See
3816 * Intel Instruction reference for INT n.
3817 *
3818 * @returns VBox strict status code.
3819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3820 * @param enmTaskSwitch What caused this task switch.
3821 * @param uNextEip The EIP effective after the task switch.
3822 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3823 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3824 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3825 * @param SelTSS The TSS selector of the new task.
3826 * @param pNewDescTSS Pointer to the new TSS descriptor.
3827 */
3828IEM_STATIC VBOXSTRICTRC
3829iemTaskSwitch(PVMCPU pVCpu,
3830 IEMTASKSWITCH enmTaskSwitch,
3831 uint32_t uNextEip,
3832 uint32_t fFlags,
3833 uint16_t uErr,
3834 uint64_t uCr2,
3835 RTSEL SelTSS,
3836 PIEMSELDESC pNewDescTSS)
3837{
3838 Assert(!IEM_IS_REAL_MODE(pVCpu));
3839 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3840 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3841
3842 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3843 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3844 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3845 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3846 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3847
3848 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3849 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3850
3851 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3852 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3853
3854 /* Update CR2 in case it's a page-fault. */
3855 /** @todo This should probably be done much earlier in IEM/PGM. See
3856 * @bugref{5653#c49}. */
3857 if (fFlags & IEM_XCPT_FLAGS_CR2)
3858 pVCpu->cpum.GstCtx.cr2 = uCr2;
3859
3860 /*
3861 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3862 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3863 */
3864 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3865 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3866 if (uNewTSSLimit < uNewTSSLimitMin)
3867 {
3868 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3869 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3870 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3871 }
3872
3873 /*
3874 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3875 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3876 */
3877 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3878 {
3879 uint32_t const uExitInfo1 = SelTSS;
3880 uint32_t uExitInfo2 = uErr;
3881 switch (enmTaskSwitch)
3882 {
3883 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3884 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3885 default: break;
3886 }
3887 if (fFlags & IEM_XCPT_FLAGS_ERR)
3888 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3889 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3890 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3891
3892 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3893 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3894 RT_NOREF2(uExitInfo1, uExitInfo2);
3895 }
3896 /** @todo Nested-VMX task-switch intercept. */
3897
3898 /*
3899 * Check the current TSS limit. The last written byte to the current TSS during the
3900 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3901 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3902 *
3903 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3904 * end up with smaller than "legal" TSS limits.
3905 */
3906 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3907 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3908 if (uCurTSSLimit < uCurTSSLimitMin)
3909 {
3910 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3911 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3912 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3913 }
3914
3915 /*
3916 * Verify that the new TSS can be accessed and map it. Map only the required contents
3917 * and not the entire TSS.
3918 */
3919 void *pvNewTSS;
3920 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3921 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3922 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3923 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3924 * not perform correct translation if this happens. See Intel spec. 7.2.1
3925 * "Task-State Segment" */
3926 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3927 if (rcStrict != VINF_SUCCESS)
3928 {
3929 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3930 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3931 return rcStrict;
3932 }
3933
3934 /*
3935 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3936 */
3937 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
3938 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3939 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3940 {
3941 PX86DESC pDescCurTSS;
3942 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3943 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3944 if (rcStrict != VINF_SUCCESS)
3945 {
3946 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3947 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3948 return rcStrict;
3949 }
3950
3951 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3952 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3953 if (rcStrict != VINF_SUCCESS)
3954 {
3955 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3956 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3957 return rcStrict;
3958 }
3959
3960 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3961 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3962 {
3963 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3964 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3965 u32EFlags &= ~X86_EFL_NT;
3966 }
3967 }
3968
3969 /*
3970 * Save the CPU state into the current TSS.
3971 */
3972 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
3973 if (GCPtrNewTSS == GCPtrCurTSS)
3974 {
3975 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3976 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3977 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
3978 }
3979 if (fIsNewTSS386)
3980 {
3981 /*
3982 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3983 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3984 */
3985 void *pvCurTSS32;
3986 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
3987 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
3988 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3989 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3990 if (rcStrict != VINF_SUCCESS)
3991 {
3992 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3993 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3994 return rcStrict;
3995 }
3996
3997 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3998 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3999 pCurTSS32->eip = uNextEip;
4000 pCurTSS32->eflags = u32EFlags;
4001 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4002 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4003 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4004 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4005 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4006 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4007 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4008 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4009 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4010 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4011 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4012 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4013 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4014 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4015
4016 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4017 if (rcStrict != VINF_SUCCESS)
4018 {
4019 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4020 VBOXSTRICTRC_VAL(rcStrict)));
4021 return rcStrict;
4022 }
4023 }
4024 else
4025 {
4026 /*
4027 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4028 */
4029 void *pvCurTSS16;
4030 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4031 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4032 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4033 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4034 if (rcStrict != VINF_SUCCESS)
4035 {
4036 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4037 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4038 return rcStrict;
4039 }
4040
4041 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4042 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4043 pCurTSS16->ip = uNextEip;
4044 pCurTSS16->flags = u32EFlags;
4045 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4046 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4047 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4048 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4049 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4050 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4051 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4052 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4053 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4054 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4055 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4056 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4057
4058 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4059 if (rcStrict != VINF_SUCCESS)
4060 {
4061 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4062 VBOXSTRICTRC_VAL(rcStrict)));
4063 return rcStrict;
4064 }
4065 }
4066
4067 /*
4068 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4069 */
4070 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4071 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4072 {
4073 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4074 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4075 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4076 }
4077
4078 /*
4079 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4080 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4081 */
4082 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4083 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4084 bool fNewDebugTrap;
4085 if (fIsNewTSS386)
4086 {
4087 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4088 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4089 uNewEip = pNewTSS32->eip;
4090 uNewEflags = pNewTSS32->eflags;
4091 uNewEax = pNewTSS32->eax;
4092 uNewEcx = pNewTSS32->ecx;
4093 uNewEdx = pNewTSS32->edx;
4094 uNewEbx = pNewTSS32->ebx;
4095 uNewEsp = pNewTSS32->esp;
4096 uNewEbp = pNewTSS32->ebp;
4097 uNewEsi = pNewTSS32->esi;
4098 uNewEdi = pNewTSS32->edi;
4099 uNewES = pNewTSS32->es;
4100 uNewCS = pNewTSS32->cs;
4101 uNewSS = pNewTSS32->ss;
4102 uNewDS = pNewTSS32->ds;
4103 uNewFS = pNewTSS32->fs;
4104 uNewGS = pNewTSS32->gs;
4105 uNewLdt = pNewTSS32->selLdt;
4106 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4107 }
4108 else
4109 {
4110 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4111 uNewCr3 = 0;
4112 uNewEip = pNewTSS16->ip;
4113 uNewEflags = pNewTSS16->flags;
4114 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4115 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4116 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4117 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4118 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4119 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4120 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4121 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4122 uNewES = pNewTSS16->es;
4123 uNewCS = pNewTSS16->cs;
4124 uNewSS = pNewTSS16->ss;
4125 uNewDS = pNewTSS16->ds;
4126 uNewFS = 0;
4127 uNewGS = 0;
4128 uNewLdt = pNewTSS16->selLdt;
4129 fNewDebugTrap = false;
4130 }
4131
4132 if (GCPtrNewTSS == GCPtrCurTSS)
4133 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4134 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4135
4136 /*
4137 * We're done accessing the new TSS.
4138 */
4139 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4140 if (rcStrict != VINF_SUCCESS)
4141 {
4142 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4143 return rcStrict;
4144 }
4145
4146 /*
4147 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4148 */
4149 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4150 {
4151 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4152 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4156 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* Check that the descriptor indicates the new TSS is available (not busy). */
4161 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4162 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4163 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4164
4165 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4166 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4167 if (rcStrict != VINF_SUCCESS)
4168 {
4169 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4170 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4171 return rcStrict;
4172 }
4173 }
4174
4175 /*
4176 * From this point on, we're technically in the new task. We will defer exceptions
4177 * until the completion of the task switch but before executing any instructions in the new task.
4178 */
4179 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4180 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4181 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4182 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4183 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4184 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4185 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4186
4187 /* Set the busy bit in TR. */
4188 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4189 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4190 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4191 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4192 {
4193 uNewEflags |= X86_EFL_NT;
4194 }
4195
4196 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4197 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4198 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4199
4200 pVCpu->cpum.GstCtx.eip = uNewEip;
4201 pVCpu->cpum.GstCtx.eax = uNewEax;
4202 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4203 pVCpu->cpum.GstCtx.edx = uNewEdx;
4204 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4205 pVCpu->cpum.GstCtx.esp = uNewEsp;
4206 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4207 pVCpu->cpum.GstCtx.esi = uNewEsi;
4208 pVCpu->cpum.GstCtx.edi = uNewEdi;
4209
4210 uNewEflags &= X86_EFL_LIVE_MASK;
4211 uNewEflags |= X86_EFL_RA1_MASK;
4212 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4213
4214 /*
4215 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4216 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4217 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4218 */
4219 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4220 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4221
4222 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4223 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4224
4225 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4226 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4227
4228 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4229 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4230
4231 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4232 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4233
4234 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4235 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4237
4238 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4239 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4240 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4241 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4242
4243 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4244 {
4245 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4246 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4247 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4248 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4249 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4250 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4251 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4252 }
4253
4254 /*
4255 * Switch CR3 for the new task.
4256 */
4257 if ( fIsNewTSS386
4258 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4259 {
4260 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4261 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4262 AssertRCSuccessReturn(rc, rc);
4263
4264 /* Inform PGM. */
4265 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4266 AssertRCReturn(rc, rc);
4267 /* ignore informational status codes */
4268
4269 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4270 }
4271
4272 /*
4273 * Switch LDTR for the new task.
4274 */
4275 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4276 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4277 else
4278 {
4279 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4280
4281 IEMSELDESC DescNewLdt;
4282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4283 if (rcStrict != VINF_SUCCESS)
4284 {
4285 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4286 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4287 return rcStrict;
4288 }
4289 if ( !DescNewLdt.Legacy.Gen.u1Present
4290 || DescNewLdt.Legacy.Gen.u1DescType
4291 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4292 {
4293 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4294 uNewLdt, DescNewLdt.Legacy.u));
4295 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4296 }
4297
4298 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4299 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4300 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4301 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4302 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4303 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4304 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4306 }
4307
4308 IEMSELDESC DescSS;
4309 if (IEM_IS_V86_MODE(pVCpu))
4310 {
4311 pVCpu->iem.s.uCpl = 3;
4312 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4313 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4314 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4315 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4316 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4317 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4318
4319 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4320 DescSS.Legacy.u = 0;
4321 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4322 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4323 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4324 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4325 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4326 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4327 DescSS.Legacy.Gen.u2Dpl = 3;
4328 }
4329 else
4330 {
4331 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4332
4333 /*
4334 * Load the stack segment for the new task.
4335 */
4336 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4337 {
4338 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4339 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4340 }
4341
4342 /* Fetch the descriptor. */
4343 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4347 VBOXSTRICTRC_VAL(rcStrict)));
4348 return rcStrict;
4349 }
4350
4351 /* SS must be a data segment and writable. */
4352 if ( !DescSS.Legacy.Gen.u1DescType
4353 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4354 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4355 {
4356 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4357 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4359 }
4360
4361 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4362 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4363 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4364 {
4365 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4366 uNewCpl));
4367 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4368 }
4369
4370 /* Is it there? */
4371 if (!DescSS.Legacy.Gen.u1Present)
4372 {
4373 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4374 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4375 }
4376
4377 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4378 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4379
4380 /* Set the accessed bit before committing the result into SS. */
4381 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4382 {
4383 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4384 if (rcStrict != VINF_SUCCESS)
4385 return rcStrict;
4386 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4387 }
4388
4389 /* Commit SS. */
4390 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4391 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4392 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4393 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4394 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4395 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4397
4398 /* CPL has changed, update IEM before loading rest of segments. */
4399 pVCpu->iem.s.uCpl = uNewCpl;
4400
4401 /*
4402 * Load the data segments for the new task.
4403 */
4404 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4405 if (rcStrict != VINF_SUCCESS)
4406 return rcStrict;
4407 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4408 if (rcStrict != VINF_SUCCESS)
4409 return rcStrict;
4410 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4411 if (rcStrict != VINF_SUCCESS)
4412 return rcStrict;
4413 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4414 if (rcStrict != VINF_SUCCESS)
4415 return rcStrict;
4416
4417 /*
4418 * Load the code segment for the new task.
4419 */
4420 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4421 {
4422 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4423 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4424 }
4425
4426 /* Fetch the descriptor. */
4427 IEMSELDESC DescCS;
4428 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4429 if (rcStrict != VINF_SUCCESS)
4430 {
4431 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4432 return rcStrict;
4433 }
4434
4435 /* CS must be a code segment. */
4436 if ( !DescCS.Legacy.Gen.u1DescType
4437 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4438 {
4439 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4440 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4441 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4442 }
4443
4444 /* For conforming CS, DPL must be less than or equal to the RPL. */
4445 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4446 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4447 {
4448 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4449 DescCS.Legacy.Gen.u2Dpl));
4450 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 /* For non-conforming CS, DPL must match RPL. */
4454 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4455 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4456 {
4457 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4458 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4459 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 /* Is it there? */
4463 if (!DescCS.Legacy.Gen.u1Present)
4464 {
4465 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4466 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4470 u64Base = X86DESC_BASE(&DescCS.Legacy);
4471
4472 /* Set the accessed bit before committing the result into CS. */
4473 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4474 {
4475 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4476 if (rcStrict != VINF_SUCCESS)
4477 return rcStrict;
4478 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4479 }
4480
4481 /* Commit CS. */
4482 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4483 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4484 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4485 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4486 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4487 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4489 }
4490
4491 /** @todo Debug trap. */
4492 if (fIsNewTSS386 && fNewDebugTrap)
4493 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4494
4495 /*
4496 * Construct the error code masks based on what caused this task switch.
4497 * See Intel Instruction reference for INT.
4498 */
4499 uint16_t uExt;
4500 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4501 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4502 {
4503 uExt = 1;
4504 }
4505 else
4506 uExt = 0;
4507
4508 /*
4509 * Push any error code on to the new stack.
4510 */
4511 if (fFlags & IEM_XCPT_FLAGS_ERR)
4512 {
4513 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4514 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4515 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4516
4517 /* Check that there is sufficient space on the stack. */
4518 /** @todo Factor out segment limit checking for normal/expand down segments
4519 * into a separate function. */
4520 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4521 {
4522 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4523 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4524 {
4525 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4526 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4527 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4528 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4529 }
4530 }
4531 else
4532 {
4533 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4534 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4535 {
4536 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4537 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4538 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4539 }
4540 }
4541
4542
4543 if (fIsNewTSS386)
4544 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4545 else
4546 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4547 if (rcStrict != VINF_SUCCESS)
4548 {
4549 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4550 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4551 return rcStrict;
4552 }
4553 }
4554
4555 /* Check the new EIP against the new CS limit. */
4556 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4557 {
4558 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4559 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4560 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4561 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4562 }
4563
4564 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4565 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4566}
4567
4568
4569/**
4570 * Implements exceptions and interrupts for protected mode.
4571 *
4572 * @returns VBox strict status code.
4573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4574 * @param cbInstr The number of bytes to offset rIP by in the return
4575 * address.
4576 * @param u8Vector The interrupt / exception vector number.
4577 * @param fFlags The flags.
4578 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4579 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4580 */
4581IEM_STATIC VBOXSTRICTRC
4582iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4583 uint8_t cbInstr,
4584 uint8_t u8Vector,
4585 uint32_t fFlags,
4586 uint16_t uErr,
4587 uint64_t uCr2)
4588{
4589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4590
4591 /*
4592 * Read the IDT entry.
4593 */
4594 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4595 {
4596 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4597 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4598 }
4599 X86DESC Idte;
4600 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4601 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4602 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4603 {
4604 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4605 return rcStrict;
4606 }
4607 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4608 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4609 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4610
4611 /*
4612 * Check the descriptor type, DPL and such.
4613 * ASSUMES this is done in the same order as described for call-gate calls.
4614 */
4615 if (Idte.Gate.u1DescType)
4616 {
4617 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4618 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4619 }
4620 bool fTaskGate = false;
4621 uint8_t f32BitGate = true;
4622 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4623 switch (Idte.Gate.u4Type)
4624 {
4625 case X86_SEL_TYPE_SYS_UNDEFINED:
4626 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4627 case X86_SEL_TYPE_SYS_LDT:
4628 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4629 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4630 case X86_SEL_TYPE_SYS_UNDEFINED2:
4631 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4632 case X86_SEL_TYPE_SYS_UNDEFINED3:
4633 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4634 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4635 case X86_SEL_TYPE_SYS_UNDEFINED4:
4636 {
4637 /** @todo check what actually happens when the type is wrong...
4638 * esp. call gates. */
4639 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4640 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4641 }
4642
4643 case X86_SEL_TYPE_SYS_286_INT_GATE:
4644 f32BitGate = false;
4645 RT_FALL_THRU();
4646 case X86_SEL_TYPE_SYS_386_INT_GATE:
4647 fEflToClear |= X86_EFL_IF;
4648 break;
4649
4650 case X86_SEL_TYPE_SYS_TASK_GATE:
4651 fTaskGate = true;
4652#ifndef IEM_IMPLEMENTS_TASKSWITCH
4653 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4654#endif
4655 break;
4656
4657 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4658 f32BitGate = false;
4659 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4660 break;
4661
4662 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4663 }
4664
4665 /* Check DPL against CPL if applicable. */
4666 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4667 {
4668 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4669 {
4670 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4671 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4672 }
4673 }
4674
4675 /* Is it there? */
4676 if (!Idte.Gate.u1Present)
4677 {
4678 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4679 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4680 }
4681
4682 /* Is it a task-gate? */
4683 if (fTaskGate)
4684 {
4685 /*
4686 * Construct the error code masks based on what caused this task switch.
4687 * See Intel Instruction reference for INT.
4688 */
4689 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4690 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4691 RTSEL SelTSS = Idte.Gate.u16Sel;
4692
4693 /*
4694 * Fetch the TSS descriptor in the GDT.
4695 */
4696 IEMSELDESC DescTSS;
4697 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4698 if (rcStrict != VINF_SUCCESS)
4699 {
4700 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4701 VBOXSTRICTRC_VAL(rcStrict)));
4702 return rcStrict;
4703 }
4704
4705 /* The TSS descriptor must be a system segment and be available (not busy). */
4706 if ( DescTSS.Legacy.Gen.u1DescType
4707 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4708 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4709 {
4710 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4711 u8Vector, SelTSS, DescTSS.Legacy.au64));
4712 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4713 }
4714
4715 /* The TSS must be present. */
4716 if (!DescTSS.Legacy.Gen.u1Present)
4717 {
4718 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4719 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4720 }
4721
4722 /* Do the actual task switch. */
4723 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4724 }
4725
4726 /* A null CS is bad. */
4727 RTSEL NewCS = Idte.Gate.u16Sel;
4728 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4729 {
4730 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4731 return iemRaiseGeneralProtectionFault0(pVCpu);
4732 }
4733
4734 /* Fetch the descriptor for the new CS. */
4735 IEMSELDESC DescCS;
4736 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4737 if (rcStrict != VINF_SUCCESS)
4738 {
4739 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4740 return rcStrict;
4741 }
4742
4743 /* Must be a code segment. */
4744 if (!DescCS.Legacy.Gen.u1DescType)
4745 {
4746 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4747 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4748 }
4749 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4750 {
4751 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4752 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4753 }
4754
4755 /* Don't allow lowering the privilege level. */
4756 /** @todo Does the lowering of privileges apply to software interrupts
4757 * only? This has bearings on the more-privileged or
4758 * same-privilege stack behavior further down. A testcase would
4759 * be nice. */
4760 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4761 {
4762 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4763 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4764 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4765 }
4766
4767 /* Make sure the selector is present. */
4768 if (!DescCS.Legacy.Gen.u1Present)
4769 {
4770 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4771 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4772 }
4773
4774 /* Check the new EIP against the new CS limit. */
4775 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4776 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4777 ? Idte.Gate.u16OffsetLow
4778 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4779 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4780 if (uNewEip > cbLimitCS)
4781 {
4782 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4783 u8Vector, uNewEip, cbLimitCS, NewCS));
4784 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4785 }
4786 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4787
4788 /* Calc the flag image to push. */
4789 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4790 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4791 fEfl &= ~X86_EFL_RF;
4792 else
4793 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4794
4795 /* From V8086 mode only go to CPL 0. */
4796 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4797 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4798 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4799 {
4800 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4801 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4802 }
4803
4804 /*
4805 * If the privilege level changes, we need to get a new stack from the TSS.
4806 * This in turns means validating the new SS and ESP...
4807 */
4808 if (uNewCpl != pVCpu->iem.s.uCpl)
4809 {
4810 RTSEL NewSS;
4811 uint32_t uNewEsp;
4812 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4813 if (rcStrict != VINF_SUCCESS)
4814 return rcStrict;
4815
4816 IEMSELDESC DescSS;
4817 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4818 if (rcStrict != VINF_SUCCESS)
4819 return rcStrict;
4820 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4821 if (!DescSS.Legacy.Gen.u1DefBig)
4822 {
4823 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4824 uNewEsp = (uint16_t)uNewEsp;
4825 }
4826
4827 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4828
4829 /* Check that there is sufficient space for the stack frame. */
4830 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4831 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4832 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4833 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4834
4835 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4836 {
4837 if ( uNewEsp - 1 > cbLimitSS
4838 || uNewEsp < cbStackFrame)
4839 {
4840 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4841 u8Vector, NewSS, uNewEsp, cbStackFrame));
4842 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4843 }
4844 }
4845 else
4846 {
4847 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4848 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4849 {
4850 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4851 u8Vector, NewSS, uNewEsp, cbStackFrame));
4852 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4853 }
4854 }
4855
4856 /*
4857 * Start making changes.
4858 */
4859
4860 /* Set the new CPL so that stack accesses use it. */
4861 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4862 pVCpu->iem.s.uCpl = uNewCpl;
4863
4864 /* Create the stack frame. */
4865 RTPTRUNION uStackFrame;
4866 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4867 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4868 if (rcStrict != VINF_SUCCESS)
4869 return rcStrict;
4870 void * const pvStackFrame = uStackFrame.pv;
4871 if (f32BitGate)
4872 {
4873 if (fFlags & IEM_XCPT_FLAGS_ERR)
4874 *uStackFrame.pu32++ = uErr;
4875 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4876 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4877 uStackFrame.pu32[2] = fEfl;
4878 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4879 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4880 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4881 if (fEfl & X86_EFL_VM)
4882 {
4883 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4884 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4885 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4886 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4887 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4888 }
4889 }
4890 else
4891 {
4892 if (fFlags & IEM_XCPT_FLAGS_ERR)
4893 *uStackFrame.pu16++ = uErr;
4894 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4895 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4896 uStackFrame.pu16[2] = fEfl;
4897 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4898 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4899 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4900 if (fEfl & X86_EFL_VM)
4901 {
4902 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4903 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4904 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4905 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4906 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4907 }
4908 }
4909 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4910 if (rcStrict != VINF_SUCCESS)
4911 return rcStrict;
4912
4913 /* Mark the selectors 'accessed' (hope this is the correct time). */
4914 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4915 * after pushing the stack frame? (Write protect the gdt + stack to
4916 * find out.) */
4917 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4918 {
4919 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4920 if (rcStrict != VINF_SUCCESS)
4921 return rcStrict;
4922 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4923 }
4924
4925 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4926 {
4927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4928 if (rcStrict != VINF_SUCCESS)
4929 return rcStrict;
4930 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4931 }
4932
4933 /*
4934 * Start comitting the register changes (joins with the DPL=CPL branch).
4935 */
4936 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
4937 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
4938 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4939 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
4940 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4941 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4942 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4943 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4944 * SP is loaded).
4945 * Need to check the other combinations too:
4946 * - 16-bit TSS, 32-bit handler
4947 * - 32-bit TSS, 16-bit handler */
4948 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
4949 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
4950 else
4951 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
4952
4953 if (fEfl & X86_EFL_VM)
4954 {
4955 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
4956 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
4957 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
4958 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
4959 }
4960 }
4961 /*
4962 * Same privilege, no stack change and smaller stack frame.
4963 */
4964 else
4965 {
4966 uint64_t uNewRsp;
4967 RTPTRUNION uStackFrame;
4968 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4969 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4970 if (rcStrict != VINF_SUCCESS)
4971 return rcStrict;
4972 void * const pvStackFrame = uStackFrame.pv;
4973
4974 if (f32BitGate)
4975 {
4976 if (fFlags & IEM_XCPT_FLAGS_ERR)
4977 *uStackFrame.pu32++ = uErr;
4978 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4979 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4980 uStackFrame.pu32[2] = fEfl;
4981 }
4982 else
4983 {
4984 if (fFlags & IEM_XCPT_FLAGS_ERR)
4985 *uStackFrame.pu16++ = uErr;
4986 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4987 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4988 uStackFrame.pu16[2] = fEfl;
4989 }
4990 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993
4994 /* Mark the CS selector as 'accessed'. */
4995 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4996 {
4997 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4998 if (rcStrict != VINF_SUCCESS)
4999 return rcStrict;
5000 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5001 }
5002
5003 /*
5004 * Start committing the register changes (joins with the other branch).
5005 */
5006 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5007 }
5008
5009 /* ... register committing continues. */
5010 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5011 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5012 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5013 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5014 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5015 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5016
5017 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5018 fEfl &= ~fEflToClear;
5019 IEMMISC_SET_EFL(pVCpu, fEfl);
5020
5021 if (fFlags & IEM_XCPT_FLAGS_CR2)
5022 pVCpu->cpum.GstCtx.cr2 = uCr2;
5023
5024 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5025 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5026
5027 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5028}
5029
5030
5031/**
5032 * Implements exceptions and interrupts for long mode.
5033 *
5034 * @returns VBox strict status code.
5035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5036 * @param cbInstr The number of bytes to offset rIP by in the return
5037 * address.
5038 * @param u8Vector The interrupt / exception vector number.
5039 * @param fFlags The flags.
5040 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5041 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5042 */
5043IEM_STATIC VBOXSTRICTRC
5044iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5045 uint8_t cbInstr,
5046 uint8_t u8Vector,
5047 uint32_t fFlags,
5048 uint16_t uErr,
5049 uint64_t uCr2)
5050{
5051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5052
5053 /*
5054 * Read the IDT entry.
5055 */
5056 uint16_t offIdt = (uint16_t)u8Vector << 4;
5057 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5058 {
5059 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5060 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5061 }
5062 X86DESC64 Idte;
5063 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5064 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5065 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5066 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5067 {
5068 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5069 return rcStrict;
5070 }
5071 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5072 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5073 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5074
5075 /*
5076 * Check the descriptor type, DPL and such.
5077 * ASSUMES this is done in the same order as described for call-gate calls.
5078 */
5079 if (Idte.Gate.u1DescType)
5080 {
5081 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5082 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5083 }
5084 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5085 switch (Idte.Gate.u4Type)
5086 {
5087 case AMD64_SEL_TYPE_SYS_INT_GATE:
5088 fEflToClear |= X86_EFL_IF;
5089 break;
5090 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5091 break;
5092
5093 default:
5094 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5096 }
5097
5098 /* Check DPL against CPL if applicable. */
5099 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5100 {
5101 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5102 {
5103 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5104 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5105 }
5106 }
5107
5108 /* Is it there? */
5109 if (!Idte.Gate.u1Present)
5110 {
5111 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5112 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5113 }
5114
5115 /* A null CS is bad. */
5116 RTSEL NewCS = Idte.Gate.u16Sel;
5117 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5118 {
5119 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5120 return iemRaiseGeneralProtectionFault0(pVCpu);
5121 }
5122
5123 /* Fetch the descriptor for the new CS. */
5124 IEMSELDESC DescCS;
5125 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5126 if (rcStrict != VINF_SUCCESS)
5127 {
5128 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5129 return rcStrict;
5130 }
5131
5132 /* Must be a 64-bit code segment. */
5133 if (!DescCS.Long.Gen.u1DescType)
5134 {
5135 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5136 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5137 }
5138 if ( !DescCS.Long.Gen.u1Long
5139 || DescCS.Long.Gen.u1DefBig
5140 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5141 {
5142 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5143 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5144 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5145 }
5146
5147 /* Don't allow lowering the privilege level. For non-conforming CS
5148 selectors, the CS.DPL sets the privilege level the trap/interrupt
5149 handler runs at. For conforming CS selectors, the CPL remains
5150 unchanged, but the CS.DPL must be <= CPL. */
5151 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5152 * when CPU in Ring-0. Result \#GP? */
5153 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5154 {
5155 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5156 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5157 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5158 }
5159
5160
5161 /* Make sure the selector is present. */
5162 if (!DescCS.Legacy.Gen.u1Present)
5163 {
5164 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5165 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5166 }
5167
5168 /* Check that the new RIP is canonical. */
5169 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5170 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5171 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5172 if (!IEM_IS_CANONICAL(uNewRip))
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5175 return iemRaiseGeneralProtectionFault0(pVCpu);
5176 }
5177
5178 /*
5179 * If the privilege level changes or if the IST isn't zero, we need to get
5180 * a new stack from the TSS.
5181 */
5182 uint64_t uNewRsp;
5183 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5184 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5185 if ( uNewCpl != pVCpu->iem.s.uCpl
5186 || Idte.Gate.u3IST != 0)
5187 {
5188 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5189 if (rcStrict != VINF_SUCCESS)
5190 return rcStrict;
5191 }
5192 else
5193 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5194 uNewRsp &= ~(uint64_t)0xf;
5195
5196 /*
5197 * Calc the flag image to push.
5198 */
5199 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5200 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5201 fEfl &= ~X86_EFL_RF;
5202 else
5203 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5204
5205 /*
5206 * Start making changes.
5207 */
5208 /* Set the new CPL so that stack accesses use it. */
5209 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5210 pVCpu->iem.s.uCpl = uNewCpl;
5211
5212 /* Create the stack frame. */
5213 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5214 RTPTRUNION uStackFrame;
5215 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5216 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5217 if (rcStrict != VINF_SUCCESS)
5218 return rcStrict;
5219 void * const pvStackFrame = uStackFrame.pv;
5220
5221 if (fFlags & IEM_XCPT_FLAGS_ERR)
5222 *uStackFrame.pu64++ = uErr;
5223 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5224 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5225 uStackFrame.pu64[2] = fEfl;
5226 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5227 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5228 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5229 if (rcStrict != VINF_SUCCESS)
5230 return rcStrict;
5231
5232 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5233 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5234 * after pushing the stack frame? (Write protect the gdt + stack to
5235 * find out.) */
5236 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5237 {
5238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5239 if (rcStrict != VINF_SUCCESS)
5240 return rcStrict;
5241 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5242 }
5243
5244 /*
5245 * Start comitting the register changes.
5246 */
5247 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5248 * hidden registers when interrupting 32-bit or 16-bit code! */
5249 if (uNewCpl != uOldCpl)
5250 {
5251 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5252 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5253 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5254 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5255 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5256 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5257 }
5258 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5259 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5260 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5261 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5262 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5263 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5264 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5265 pVCpu->cpum.GstCtx.rip = uNewRip;
5266
5267 fEfl &= ~fEflToClear;
5268 IEMMISC_SET_EFL(pVCpu, fEfl);
5269
5270 if (fFlags & IEM_XCPT_FLAGS_CR2)
5271 pVCpu->cpum.GstCtx.cr2 = uCr2;
5272
5273 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5274 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5275
5276 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5277}
5278
5279
5280/**
5281 * Implements exceptions and interrupts.
5282 *
5283 * All exceptions and interrupts goes thru this function!
5284 *
5285 * @returns VBox strict status code.
5286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5287 * @param cbInstr The number of bytes to offset rIP by in the return
5288 * address.
5289 * @param u8Vector The interrupt / exception vector number.
5290 * @param fFlags The flags.
5291 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5292 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5293 */
5294DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5295iemRaiseXcptOrInt(PVMCPU pVCpu,
5296 uint8_t cbInstr,
5297 uint8_t u8Vector,
5298 uint32_t fFlags,
5299 uint16_t uErr,
5300 uint64_t uCr2)
5301{
5302 /*
5303 * Get all the state that we might need here.
5304 */
5305 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5306 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5307
5308#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5309 /*
5310 * Flush prefetch buffer
5311 */
5312 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5313#endif
5314
5315 /*
5316 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5317 */
5318 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5319 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5320 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5321 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5322 {
5323 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5324 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5325 u8Vector = X86_XCPT_GP;
5326 uErr = 0;
5327 }
5328#ifdef DBGFTRACE_ENABLED
5329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5330 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5331 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5332#endif
5333
5334#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5335 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5336 {
5337 /*
5338 * If the event is being injected as part of VMRUN, it isn't subject to event
5339 * intercepts in the nested-guest. However, secondary exceptions that occur
5340 * during injection of any event -are- subject to exception intercepts.
5341 * See AMD spec. 15.20 "Event Injection".
5342 */
5343 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5344 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5345 else
5346 {
5347 /*
5348 * Check and handle if the event being raised is intercepted.
5349 */
5350 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5351 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5352 return rcStrict0;
5353 }
5354 }
5355#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5356
5357 /*
5358 * Do recursion accounting.
5359 */
5360 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5361 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5362 if (pVCpu->iem.s.cXcptRecursions == 0)
5363 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5364 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5365 else
5366 {
5367 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5368 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5369 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5370
5371 if (pVCpu->iem.s.cXcptRecursions >= 3)
5372 {
5373#ifdef DEBUG_bird
5374 AssertFailed();
5375#endif
5376 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5377 }
5378
5379 /*
5380 * Evaluate the sequence of recurring events.
5381 */
5382 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5383 NULL /* pXcptRaiseInfo */);
5384 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5385 { /* likely */ }
5386 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5387 {
5388 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5389 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5390 u8Vector = X86_XCPT_DF;
5391 uErr = 0;
5392 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5393 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5394 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5395 }
5396 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5397 {
5398 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5399 return iemInitiateCpuShutdown(pVCpu);
5400 }
5401 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5402 {
5403 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5404 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5405 if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5406 return VERR_EM_GUEST_CPU_HANG;
5407 }
5408 else
5409 {
5410 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5411 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5412 return VERR_IEM_IPE_9;
5413 }
5414
5415 /*
5416 * The 'EXT' bit is set when an exception occurs during deliver of an external
5417 * event (such as an interrupt or earlier exception)[1]. Privileged software
5418 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5419 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5420 *
5421 * [1] - Intel spec. 6.13 "Error Code"
5422 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5423 * [3] - Intel Instruction reference for INT n.
5424 */
5425 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5426 && (fFlags & IEM_XCPT_FLAGS_ERR)
5427 && u8Vector != X86_XCPT_PF
5428 && u8Vector != X86_XCPT_DF)
5429 {
5430 uErr |= X86_TRAP_ERR_EXTERNAL;
5431 }
5432 }
5433
5434 pVCpu->iem.s.cXcptRecursions++;
5435 pVCpu->iem.s.uCurXcpt = u8Vector;
5436 pVCpu->iem.s.fCurXcpt = fFlags;
5437 pVCpu->iem.s.uCurXcptErr = uErr;
5438 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5439
5440 /*
5441 * Extensive logging.
5442 */
5443#if defined(LOG_ENABLED) && defined(IN_RING3)
5444 if (LogIs3Enabled())
5445 {
5446 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5447 PVM pVM = pVCpu->CTX_SUFF(pVM);
5448 char szRegs[4096];
5449 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5450 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5451 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5452 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5453 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5454 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5455 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5456 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5457 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5458 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5459 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5460 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5461 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5462 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5463 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5464 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5465 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5466 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5467 " efer=%016VR{efer}\n"
5468 " pat=%016VR{pat}\n"
5469 " sf_mask=%016VR{sf_mask}\n"
5470 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5471 " lstar=%016VR{lstar}\n"
5472 " star=%016VR{star} cstar=%016VR{cstar}\n"
5473 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5474 );
5475
5476 char szInstr[256];
5477 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5478 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5479 szInstr, sizeof(szInstr), NULL);
5480 Log3(("%s%s\n", szRegs, szInstr));
5481 }
5482#endif /* LOG_ENABLED */
5483
5484 /*
5485 * Call the mode specific worker function.
5486 */
5487 VBOXSTRICTRC rcStrict;
5488 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5489 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5490 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5491 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5492 else
5493 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5494
5495 /* Flush the prefetch buffer. */
5496#ifdef IEM_WITH_CODE_TLB
5497 pVCpu->iem.s.pbInstrBuf = NULL;
5498#else
5499 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5500#endif
5501
5502 /*
5503 * Unwind.
5504 */
5505 pVCpu->iem.s.cXcptRecursions--;
5506 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5507 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5508 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5509 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5510 pVCpu->iem.s.cXcptRecursions + 1));
5511 return rcStrict;
5512}
5513
5514#ifdef IEM_WITH_SETJMP
5515/**
5516 * See iemRaiseXcptOrInt. Will not return.
5517 */
5518IEM_STATIC DECL_NO_RETURN(void)
5519iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5520 uint8_t cbInstr,
5521 uint8_t u8Vector,
5522 uint32_t fFlags,
5523 uint16_t uErr,
5524 uint64_t uCr2)
5525{
5526 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5527 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5528}
5529#endif
5530
5531
5532/** \#DE - 00. */
5533DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5534{
5535 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5536}
5537
5538
5539/** \#DB - 01.
5540 * @note This automatically clear DR7.GD. */
5541DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5542{
5543 /** @todo set/clear RF. */
5544 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5545 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5546}
5547
5548
5549/** \#BR - 05. */
5550DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5551{
5552 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5553}
5554
5555
5556/** \#UD - 06. */
5557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5558{
5559 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5560}
5561
5562
5563/** \#NM - 07. */
5564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5565{
5566 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5567}
5568
5569
5570/** \#TS(err) - 0a. */
5571DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5572{
5573 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5574}
5575
5576
5577/** \#TS(tr) - 0a. */
5578DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5579{
5580 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5581 pVCpu->cpum.GstCtx.tr.Sel, 0);
5582}
5583
5584
5585/** \#TS(0) - 0a. */
5586DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5587{
5588 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5589 0, 0);
5590}
5591
5592
5593/** \#TS(err) - 0a. */
5594DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5595{
5596 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5597 uSel & X86_SEL_MASK_OFF_RPL, 0);
5598}
5599
5600
5601/** \#NP(err) - 0b. */
5602DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5603{
5604 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5605}
5606
5607
5608/** \#NP(sel) - 0b. */
5609DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5610{
5611 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5612 uSel & ~X86_SEL_RPL, 0);
5613}
5614
5615
5616/** \#SS(seg) - 0c. */
5617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5618{
5619 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5620 uSel & ~X86_SEL_RPL, 0);
5621}
5622
5623
5624/** \#SS(err) - 0c. */
5625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5626{
5627 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5628}
5629
5630
5631/** \#GP(n) - 0d. */
5632DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5633{
5634 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5635}
5636
5637
5638/** \#GP(0) - 0d. */
5639DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5640{
5641 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5642}
5643
5644#ifdef IEM_WITH_SETJMP
5645/** \#GP(0) - 0d. */
5646DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5647{
5648 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5649}
5650#endif
5651
5652
5653/** \#GP(sel) - 0d. */
5654DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5655{
5656 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5657 Sel & ~X86_SEL_RPL, 0);
5658}
5659
5660
5661/** \#GP(0) - 0d. */
5662DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5663{
5664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5665}
5666
5667
5668/** \#GP(sel) - 0d. */
5669DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5670{
5671 NOREF(iSegReg); NOREF(fAccess);
5672 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5673 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5674}
5675
5676#ifdef IEM_WITH_SETJMP
5677/** \#GP(sel) - 0d, longjmp. */
5678DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5679{
5680 NOREF(iSegReg); NOREF(fAccess);
5681 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5682 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5683}
5684#endif
5685
5686/** \#GP(sel) - 0d. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5688{
5689 NOREF(Sel);
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5691}
5692
5693#ifdef IEM_WITH_SETJMP
5694/** \#GP(sel) - 0d, longjmp. */
5695DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5696{
5697 NOREF(Sel);
5698 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5699}
5700#endif
5701
5702
5703/** \#GP(sel) - 0d. */
5704DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5705{
5706 NOREF(iSegReg); NOREF(fAccess);
5707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5708}
5709
5710#ifdef IEM_WITH_SETJMP
5711/** \#GP(sel) - 0d, longjmp. */
5712DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5713 uint32_t fAccess)
5714{
5715 NOREF(iSegReg); NOREF(fAccess);
5716 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5717}
5718#endif
5719
5720
5721/** \#PF(n) - 0e. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5723{
5724 uint16_t uErr;
5725 switch (rc)
5726 {
5727 case VERR_PAGE_NOT_PRESENT:
5728 case VERR_PAGE_TABLE_NOT_PRESENT:
5729 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5730 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5731 uErr = 0;
5732 break;
5733
5734 default:
5735 AssertMsgFailed(("%Rrc\n", rc));
5736 RT_FALL_THRU();
5737 case VERR_ACCESS_DENIED:
5738 uErr = X86_TRAP_PF_P;
5739 break;
5740
5741 /** @todo reserved */
5742 }
5743
5744 if (pVCpu->iem.s.uCpl == 3)
5745 uErr |= X86_TRAP_PF_US;
5746
5747 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5748 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5749 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5750 uErr |= X86_TRAP_PF_ID;
5751
5752#if 0 /* This is so much non-sense, really. Why was it done like that? */
5753 /* Note! RW access callers reporting a WRITE protection fault, will clear
5754 the READ flag before calling. So, read-modify-write accesses (RW)
5755 can safely be reported as READ faults. */
5756 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5757 uErr |= X86_TRAP_PF_RW;
5758#else
5759 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5760 {
5761 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5762 uErr |= X86_TRAP_PF_RW;
5763 }
5764#endif
5765
5766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5767 uErr, GCPtrWhere);
5768}
5769
5770#ifdef IEM_WITH_SETJMP
5771/** \#PF(n) - 0e, longjmp. */
5772IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5773{
5774 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5775}
5776#endif
5777
5778
5779/** \#MF(0) - 10. */
5780DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5781{
5782 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5783}
5784
5785
5786/** \#AC(0) - 11. */
5787DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5788{
5789 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5790}
5791
5792
5793/**
5794 * Macro for calling iemCImplRaiseDivideError().
5795 *
5796 * This enables us to add/remove arguments and force different levels of
5797 * inlining as we wish.
5798 *
5799 * @return Strict VBox status code.
5800 */
5801#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5802IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5803{
5804 NOREF(cbInstr);
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5806}
5807
5808
5809/**
5810 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5811 *
5812 * This enables us to add/remove arguments and force different levels of
5813 * inlining as we wish.
5814 *
5815 * @return Strict VBox status code.
5816 */
5817#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5818IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5819{
5820 NOREF(cbInstr);
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5822}
5823
5824
5825/**
5826 * Macro for calling iemCImplRaiseInvalidOpcode().
5827 *
5828 * This enables us to add/remove arguments and force different levels of
5829 * inlining as we wish.
5830 *
5831 * @return Strict VBox status code.
5832 */
5833#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5834IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5835{
5836 NOREF(cbInstr);
5837 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5838}
5839
5840
5841/** @} */
5842
5843
5844/*
5845 *
5846 * Helpers routines.
5847 * Helpers routines.
5848 * Helpers routines.
5849 *
5850 */
5851
5852/**
5853 * Recalculates the effective operand size.
5854 *
5855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5856 */
5857IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5858{
5859 switch (pVCpu->iem.s.enmCpuMode)
5860 {
5861 case IEMMODE_16BIT:
5862 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5863 break;
5864 case IEMMODE_32BIT:
5865 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5866 break;
5867 case IEMMODE_64BIT:
5868 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5869 {
5870 case 0:
5871 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5872 break;
5873 case IEM_OP_PRF_SIZE_OP:
5874 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5875 break;
5876 case IEM_OP_PRF_SIZE_REX_W:
5877 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5878 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5879 break;
5880 }
5881 break;
5882 default:
5883 AssertFailed();
5884 }
5885}
5886
5887
5888/**
5889 * Sets the default operand size to 64-bit and recalculates the effective
5890 * operand size.
5891 *
5892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5893 */
5894IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5895{
5896 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5897 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5898 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5899 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5900 else
5901 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5902}
5903
5904
5905/*
5906 *
5907 * Common opcode decoders.
5908 * Common opcode decoders.
5909 * Common opcode decoders.
5910 *
5911 */
5912//#include <iprt/mem.h>
5913
5914/**
5915 * Used to add extra details about a stub case.
5916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5917 */
5918IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5919{
5920#if defined(LOG_ENABLED) && defined(IN_RING3)
5921 PVM pVM = pVCpu->CTX_SUFF(pVM);
5922 char szRegs[4096];
5923 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5924 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5925 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5926 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5927 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5928 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5929 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5930 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5931 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5932 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5933 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5934 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5935 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5936 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5937 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5938 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5939 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5940 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5941 " efer=%016VR{efer}\n"
5942 " pat=%016VR{pat}\n"
5943 " sf_mask=%016VR{sf_mask}\n"
5944 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5945 " lstar=%016VR{lstar}\n"
5946 " star=%016VR{star} cstar=%016VR{cstar}\n"
5947 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5948 );
5949
5950 char szInstr[256];
5951 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5952 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5953 szInstr, sizeof(szInstr), NULL);
5954
5955 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5956#else
5957 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
5958#endif
5959}
5960
5961/**
5962 * Complains about a stub.
5963 *
5964 * Providing two versions of this macro, one for daily use and one for use when
5965 * working on IEM.
5966 */
5967#if 0
5968# define IEMOP_BITCH_ABOUT_STUB() \
5969 do { \
5970 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5971 iemOpStubMsg2(pVCpu); \
5972 RTAssertPanic(); \
5973 } while (0)
5974#else
5975# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5976#endif
5977
5978/** Stubs an opcode. */
5979#define FNIEMOP_STUB(a_Name) \
5980 FNIEMOP_DEF(a_Name) \
5981 { \
5982 RT_NOREF_PV(pVCpu); \
5983 IEMOP_BITCH_ABOUT_STUB(); \
5984 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5985 } \
5986 typedef int ignore_semicolon
5987
5988/** Stubs an opcode. */
5989#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5990 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5991 { \
5992 RT_NOREF_PV(pVCpu); \
5993 RT_NOREF_PV(a_Name0); \
5994 IEMOP_BITCH_ABOUT_STUB(); \
5995 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5996 } \
5997 typedef int ignore_semicolon
5998
5999/** Stubs an opcode which currently should raise \#UD. */
6000#define FNIEMOP_UD_STUB(a_Name) \
6001 FNIEMOP_DEF(a_Name) \
6002 { \
6003 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6004 return IEMOP_RAISE_INVALID_OPCODE(); \
6005 } \
6006 typedef int ignore_semicolon
6007
6008/** Stubs an opcode which currently should raise \#UD. */
6009#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6010 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6011 { \
6012 RT_NOREF_PV(pVCpu); \
6013 RT_NOREF_PV(a_Name0); \
6014 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6015 return IEMOP_RAISE_INVALID_OPCODE(); \
6016 } \
6017 typedef int ignore_semicolon
6018
6019
6020
6021/** @name Register Access.
6022 * @{
6023 */
6024
6025/**
6026 * Gets a reference (pointer) to the specified hidden segment register.
6027 *
6028 * @returns Hidden register reference.
6029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6030 * @param iSegReg The segment register.
6031 */
6032IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6033{
6034 Assert(iSegReg < X86_SREG_COUNT);
6035 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6036 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6037
6038#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6039 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6040 { /* likely */ }
6041 else
6042 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6043#else
6044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6045#endif
6046 return pSReg;
6047}
6048
6049
6050/**
6051 * Ensures that the given hidden segment register is up to date.
6052 *
6053 * @returns Hidden register reference.
6054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6055 * @param pSReg The segment register.
6056 */
6057IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6058{
6059#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6060 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6061 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6062#else
6063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6064 NOREF(pVCpu);
6065#endif
6066 return pSReg;
6067}
6068
6069
6070/**
6071 * Gets a reference (pointer) to the specified segment register (the selector
6072 * value).
6073 *
6074 * @returns Pointer to the selector variable.
6075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6076 * @param iSegReg The segment register.
6077 */
6078DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6079{
6080 Assert(iSegReg < X86_SREG_COUNT);
6081 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6082 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6083}
6084
6085
6086/**
6087 * Fetches the selector value of a segment register.
6088 *
6089 * @returns The selector value.
6090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6091 * @param iSegReg The segment register.
6092 */
6093DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6094{
6095 Assert(iSegReg < X86_SREG_COUNT);
6096 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6097 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6098}
6099
6100
6101/**
6102 * Fetches the base address value of a segment register.
6103 *
6104 * @returns The selector value.
6105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6106 * @param iSegReg The segment register.
6107 */
6108DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6109{
6110 Assert(iSegReg < X86_SREG_COUNT);
6111 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6112 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6113}
6114
6115
6116/**
6117 * Gets a reference (pointer) to the specified general purpose register.
6118 *
6119 * @returns Register reference.
6120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6121 * @param iReg The general purpose register.
6122 */
6123DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6124{
6125 Assert(iReg < 16);
6126 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6127}
6128
6129
6130/**
6131 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6132 *
6133 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6134 *
6135 * @returns Register reference.
6136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6137 * @param iReg The register.
6138 */
6139DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6140{
6141 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6142 {
6143 Assert(iReg < 16);
6144 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6145 }
6146 /* high 8-bit register. */
6147 Assert(iReg < 8);
6148 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6149}
6150
6151
6152/**
6153 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6154 *
6155 * @returns Register reference.
6156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6157 * @param iReg The register.
6158 */
6159DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6160{
6161 Assert(iReg < 16);
6162 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6163}
6164
6165
6166/**
6167 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6168 *
6169 * @returns Register reference.
6170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6171 * @param iReg The register.
6172 */
6173DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6174{
6175 Assert(iReg < 16);
6176 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6177}
6178
6179
6180/**
6181 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6182 *
6183 * @returns Register reference.
6184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6185 * @param iReg The register.
6186 */
6187DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6188{
6189 Assert(iReg < 64);
6190 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6191}
6192
6193
6194/**
6195 * Gets a reference (pointer) to the specified segment register's base address.
6196 *
6197 * @returns Segment register base address reference.
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param iSegReg The segment selector.
6200 */
6201DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6202{
6203 Assert(iSegReg < X86_SREG_COUNT);
6204 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6205 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6206}
6207
6208
6209/**
6210 * Fetches the value of a 8-bit general purpose register.
6211 *
6212 * @returns The register value.
6213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6214 * @param iReg The register.
6215 */
6216DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6217{
6218 return *iemGRegRefU8(pVCpu, iReg);
6219}
6220
6221
6222/**
6223 * Fetches the value of a 16-bit general purpose register.
6224 *
6225 * @returns The register value.
6226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6227 * @param iReg The register.
6228 */
6229DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6230{
6231 Assert(iReg < 16);
6232 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6233}
6234
6235
6236/**
6237 * Fetches the value of a 32-bit general purpose register.
6238 *
6239 * @returns The register value.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6247}
6248
6249
6250/**
6251 * Fetches the value of a 64-bit general purpose register.
6252 *
6253 * @returns The register value.
6254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6255 * @param iReg The register.
6256 */
6257DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6258{
6259 Assert(iReg < 16);
6260 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6261}
6262
6263
6264/**
6265 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6266 *
6267 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6268 * segment limit.
6269 *
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param offNextInstr The offset of the next instruction.
6272 */
6273IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6274{
6275 switch (pVCpu->iem.s.enmEffOpSize)
6276 {
6277 case IEMMODE_16BIT:
6278 {
6279 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6280 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6281 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6282 return iemRaiseGeneralProtectionFault0(pVCpu);
6283 pVCpu->cpum.GstCtx.rip = uNewIp;
6284 break;
6285 }
6286
6287 case IEMMODE_32BIT:
6288 {
6289 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6290 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6291
6292 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6293 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6294 return iemRaiseGeneralProtectionFault0(pVCpu);
6295 pVCpu->cpum.GstCtx.rip = uNewEip;
6296 break;
6297 }
6298
6299 case IEMMODE_64BIT:
6300 {
6301 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6302
6303 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6304 if (!IEM_IS_CANONICAL(uNewRip))
6305 return iemRaiseGeneralProtectionFault0(pVCpu);
6306 pVCpu->cpum.GstCtx.rip = uNewRip;
6307 break;
6308 }
6309
6310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6311 }
6312
6313 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6314
6315#ifndef IEM_WITH_CODE_TLB
6316 /* Flush the prefetch buffer. */
6317 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6318#endif
6319
6320 return VINF_SUCCESS;
6321}
6322
6323
6324/**
6325 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6326 *
6327 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6328 * segment limit.
6329 *
6330 * @returns Strict VBox status code.
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param offNextInstr The offset of the next instruction.
6333 */
6334IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6335{
6336 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6337
6338 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6339 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6340 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6341 return iemRaiseGeneralProtectionFault0(pVCpu);
6342 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6343 pVCpu->cpum.GstCtx.rip = uNewIp;
6344 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6345
6346#ifndef IEM_WITH_CODE_TLB
6347 /* Flush the prefetch buffer. */
6348 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6349#endif
6350
6351 return VINF_SUCCESS;
6352}
6353
6354
6355/**
6356 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6357 *
6358 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6359 * segment limit.
6360 *
6361 * @returns Strict VBox status code.
6362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6363 * @param offNextInstr The offset of the next instruction.
6364 */
6365IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6366{
6367 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6368
6369 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6370 {
6371 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6372
6373 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6374 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6375 return iemRaiseGeneralProtectionFault0(pVCpu);
6376 pVCpu->cpum.GstCtx.rip = uNewEip;
6377 }
6378 else
6379 {
6380 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6381
6382 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6383 if (!IEM_IS_CANONICAL(uNewRip))
6384 return iemRaiseGeneralProtectionFault0(pVCpu);
6385 pVCpu->cpum.GstCtx.rip = uNewRip;
6386 }
6387 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6388
6389#ifndef IEM_WITH_CODE_TLB
6390 /* Flush the prefetch buffer. */
6391 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6392#endif
6393
6394 return VINF_SUCCESS;
6395}
6396
6397
6398/**
6399 * Performs a near jump to the specified address.
6400 *
6401 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6402 * segment limit.
6403 *
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param uNewRip The new RIP value.
6406 */
6407IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6408{
6409 switch (pVCpu->iem.s.enmEffOpSize)
6410 {
6411 case IEMMODE_16BIT:
6412 {
6413 Assert(uNewRip <= UINT16_MAX);
6414 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6415 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6416 return iemRaiseGeneralProtectionFault0(pVCpu);
6417 /** @todo Test 16-bit jump in 64-bit mode. */
6418 pVCpu->cpum.GstCtx.rip = uNewRip;
6419 break;
6420 }
6421
6422 case IEMMODE_32BIT:
6423 {
6424 Assert(uNewRip <= UINT32_MAX);
6425 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6426 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6427
6428 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6429 return iemRaiseGeneralProtectionFault0(pVCpu);
6430 pVCpu->cpum.GstCtx.rip = uNewRip;
6431 break;
6432 }
6433
6434 case IEMMODE_64BIT:
6435 {
6436 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6437
6438 if (!IEM_IS_CANONICAL(uNewRip))
6439 return iemRaiseGeneralProtectionFault0(pVCpu);
6440 pVCpu->cpum.GstCtx.rip = uNewRip;
6441 break;
6442 }
6443
6444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6445 }
6446
6447 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6448
6449#ifndef IEM_WITH_CODE_TLB
6450 /* Flush the prefetch buffer. */
6451 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6452#endif
6453
6454 return VINF_SUCCESS;
6455}
6456
6457
6458/**
6459 * Get the address of the top of the stack.
6460 *
6461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6462 */
6463DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6464{
6465 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6466 return pVCpu->cpum.GstCtx.rsp;
6467 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6468 return pVCpu->cpum.GstCtx.esp;
6469 return pVCpu->cpum.GstCtx.sp;
6470}
6471
6472
6473/**
6474 * Updates the RIP/EIP/IP to point to the next instruction.
6475 *
6476 * This function leaves the EFLAGS.RF flag alone.
6477 *
6478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6479 * @param cbInstr The number of bytes to add.
6480 */
6481IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6482{
6483 switch (pVCpu->iem.s.enmCpuMode)
6484 {
6485 case IEMMODE_16BIT:
6486 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6487 pVCpu->cpum.GstCtx.eip += cbInstr;
6488 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6489 break;
6490
6491 case IEMMODE_32BIT:
6492 pVCpu->cpum.GstCtx.eip += cbInstr;
6493 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6494 break;
6495
6496 case IEMMODE_64BIT:
6497 pVCpu->cpum.GstCtx.rip += cbInstr;
6498 break;
6499 default: AssertFailed();
6500 }
6501}
6502
6503
6504#if 0
6505/**
6506 * Updates the RIP/EIP/IP to point to the next instruction.
6507 *
6508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6509 */
6510IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6511{
6512 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6513}
6514#endif
6515
6516
6517
6518/**
6519 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6520 *
6521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6522 * @param cbInstr The number of bytes to add.
6523 */
6524IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6525{
6526 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6527
6528 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6529#if ARCH_BITS >= 64
6530 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6531 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6532 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6533#else
6534 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6535 pVCpu->cpum.GstCtx.rip += cbInstr;
6536 else
6537 pVCpu->cpum.GstCtx.eip += cbInstr;
6538#endif
6539}
6540
6541
6542/**
6543 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6544 *
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 */
6547IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6548{
6549 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6550}
6551
6552
6553/**
6554 * Adds to the stack pointer.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param cbToAdd The number of bytes to add (8-bit!).
6558 */
6559DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6560{
6561 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6562 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6563 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6564 pVCpu->cpum.GstCtx.esp += cbToAdd;
6565 else
6566 pVCpu->cpum.GstCtx.sp += cbToAdd;
6567}
6568
6569
6570/**
6571 * Subtracts from the stack pointer.
6572 *
6573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6574 * @param cbToSub The number of bytes to subtract (8-bit!).
6575 */
6576DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6577{
6578 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6579 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6580 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6581 pVCpu->cpum.GstCtx.esp -= cbToSub;
6582 else
6583 pVCpu->cpum.GstCtx.sp -= cbToSub;
6584}
6585
6586
6587/**
6588 * Adds to the temporary stack pointer.
6589 *
6590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6591 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6592 * @param cbToAdd The number of bytes to add (16-bit).
6593 */
6594DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6595{
6596 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6597 pTmpRsp->u += cbToAdd;
6598 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6599 pTmpRsp->DWords.dw0 += cbToAdd;
6600 else
6601 pTmpRsp->Words.w0 += cbToAdd;
6602}
6603
6604
6605/**
6606 * Subtracts from the temporary stack pointer.
6607 *
6608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6609 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6610 * @param cbToSub The number of bytes to subtract.
6611 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6612 * expecting that.
6613 */
6614DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6615{
6616 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6617 pTmpRsp->u -= cbToSub;
6618 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6619 pTmpRsp->DWords.dw0 -= cbToSub;
6620 else
6621 pTmpRsp->Words.w0 -= cbToSub;
6622}
6623
6624
6625/**
6626 * Calculates the effective stack address for a push of the specified size as
6627 * well as the new RSP value (upper bits may be masked).
6628 *
6629 * @returns Effective stack addressf for the push.
6630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6631 * @param cbItem The size of the stack item to pop.
6632 * @param puNewRsp Where to return the new RSP value.
6633 */
6634DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6635{
6636 RTUINT64U uTmpRsp;
6637 RTGCPTR GCPtrTop;
6638 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6639
6640 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6641 GCPtrTop = uTmpRsp.u -= cbItem;
6642 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6643 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6644 else
6645 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6646 *puNewRsp = uTmpRsp.u;
6647 return GCPtrTop;
6648}
6649
6650
6651/**
6652 * Gets the current stack pointer and calculates the value after a pop of the
6653 * specified size.
6654 *
6655 * @returns Current stack pointer.
6656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6657 * @param cbItem The size of the stack item to pop.
6658 * @param puNewRsp Where to return the new RSP value.
6659 */
6660DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6661{
6662 RTUINT64U uTmpRsp;
6663 RTGCPTR GCPtrTop;
6664 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6665
6666 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6667 {
6668 GCPtrTop = uTmpRsp.u;
6669 uTmpRsp.u += cbItem;
6670 }
6671 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6672 {
6673 GCPtrTop = uTmpRsp.DWords.dw0;
6674 uTmpRsp.DWords.dw0 += cbItem;
6675 }
6676 else
6677 {
6678 GCPtrTop = uTmpRsp.Words.w0;
6679 uTmpRsp.Words.w0 += cbItem;
6680 }
6681 *puNewRsp = uTmpRsp.u;
6682 return GCPtrTop;
6683}
6684
6685
6686/**
6687 * Calculates the effective stack address for a push of the specified size as
6688 * well as the new temporary RSP value (upper bits may be masked).
6689 *
6690 * @returns Effective stack addressf for the push.
6691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6692 * @param pTmpRsp The temporary stack pointer. This is updated.
6693 * @param cbItem The size of the stack item to pop.
6694 */
6695DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6696{
6697 RTGCPTR GCPtrTop;
6698
6699 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6700 GCPtrTop = pTmpRsp->u -= cbItem;
6701 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6702 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6703 else
6704 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6705 return GCPtrTop;
6706}
6707
6708
6709/**
6710 * Gets the effective stack address for a pop of the specified size and
6711 * calculates and updates the temporary RSP.
6712 *
6713 * @returns Current stack pointer.
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 * @param pTmpRsp The temporary stack pointer. This is updated.
6716 * @param cbItem The size of the stack item to pop.
6717 */
6718DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6719{
6720 RTGCPTR GCPtrTop;
6721 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6722 {
6723 GCPtrTop = pTmpRsp->u;
6724 pTmpRsp->u += cbItem;
6725 }
6726 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6727 {
6728 GCPtrTop = pTmpRsp->DWords.dw0;
6729 pTmpRsp->DWords.dw0 += cbItem;
6730 }
6731 else
6732 {
6733 GCPtrTop = pTmpRsp->Words.w0;
6734 pTmpRsp->Words.w0 += cbItem;
6735 }
6736 return GCPtrTop;
6737}
6738
6739/** @} */
6740
6741
6742/** @name FPU access and helpers.
6743 *
6744 * @{
6745 */
6746
6747
6748/**
6749 * Hook for preparing to use the host FPU.
6750 *
6751 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6752 *
6753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6754 */
6755DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6756{
6757#ifdef IN_RING3
6758 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6759#else
6760 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6761#endif
6762 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6763}
6764
6765
6766/**
6767 * Hook for preparing to use the host FPU for SSE.
6768 *
6769 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6770 *
6771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6772 */
6773DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6774{
6775 iemFpuPrepareUsage(pVCpu);
6776}
6777
6778
6779/**
6780 * Hook for preparing to use the host FPU for AVX.
6781 *
6782 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6783 *
6784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6785 */
6786DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6787{
6788 iemFpuPrepareUsage(pVCpu);
6789}
6790
6791
6792/**
6793 * Hook for actualizing the guest FPU state before the interpreter reads it.
6794 *
6795 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6796 *
6797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6798 */
6799DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6800{
6801#ifdef IN_RING3
6802 NOREF(pVCpu);
6803#else
6804 CPUMRZFpuStateActualizeForRead(pVCpu);
6805#endif
6806 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6807}
6808
6809
6810/**
6811 * Hook for actualizing the guest FPU state before the interpreter changes it.
6812 *
6813 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6814 *
6815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6816 */
6817DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6818{
6819#ifdef IN_RING3
6820 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6821#else
6822 CPUMRZFpuStateActualizeForChange(pVCpu);
6823#endif
6824 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6825}
6826
6827
6828/**
6829 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6830 * only.
6831 *
6832 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6833 *
6834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6835 */
6836DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6837{
6838#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6839 NOREF(pVCpu);
6840#else
6841 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6842#endif
6843 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6844}
6845
6846
6847/**
6848 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6849 * read+write.
6850 *
6851 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6852 *
6853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6854 */
6855DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6856{
6857#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6858 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6859#else
6860 CPUMRZFpuStateActualizeForChange(pVCpu);
6861#endif
6862 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6863}
6864
6865
6866/**
6867 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6868 * only.
6869 *
6870 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6871 *
6872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6873 */
6874DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6875{
6876#ifdef IN_RING3
6877 NOREF(pVCpu);
6878#else
6879 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6880#endif
6881 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6882}
6883
6884
6885/**
6886 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6887 * read+write.
6888 *
6889 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6890 *
6891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6892 */
6893DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6894{
6895#ifdef IN_RING3
6896 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6897#else
6898 CPUMRZFpuStateActualizeForChange(pVCpu);
6899#endif
6900 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6901}
6902
6903
6904/**
6905 * Stores a QNaN value into a FPU register.
6906 *
6907 * @param pReg Pointer to the register.
6908 */
6909DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6910{
6911 pReg->au32[0] = UINT32_C(0x00000000);
6912 pReg->au32[1] = UINT32_C(0xc0000000);
6913 pReg->au16[4] = UINT16_C(0xffff);
6914}
6915
6916
6917/**
6918 * Updates the FOP, FPU.CS and FPUIP registers.
6919 *
6920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6921 * @param pFpuCtx The FPU context.
6922 */
6923DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
6924{
6925 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6926 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6927 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6928 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6929 {
6930 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6931 * happens in real mode here based on the fnsave and fnstenv images. */
6932 pFpuCtx->CS = 0;
6933 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
6934 }
6935 else
6936 {
6937 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
6938 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
6939 }
6940}
6941
6942
6943/**
6944 * Updates the x87.DS and FPUDP registers.
6945 *
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 * @param pFpuCtx The FPU context.
6948 * @param iEffSeg The effective segment register.
6949 * @param GCPtrEff The effective address relative to @a iEffSeg.
6950 */
6951DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6952{
6953 RTSEL sel;
6954 switch (iEffSeg)
6955 {
6956 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
6957 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
6958 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
6959 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
6960 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
6961 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
6962 default:
6963 AssertMsgFailed(("%d\n", iEffSeg));
6964 sel = pVCpu->cpum.GstCtx.ds.Sel;
6965 }
6966 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6967 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6968 {
6969 pFpuCtx->DS = 0;
6970 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6971 }
6972 else
6973 {
6974 pFpuCtx->DS = sel;
6975 pFpuCtx->FPUDP = GCPtrEff;
6976 }
6977}
6978
6979
6980/**
6981 * Rotates the stack registers in the push direction.
6982 *
6983 * @param pFpuCtx The FPU context.
6984 * @remarks This is a complete waste of time, but fxsave stores the registers in
6985 * stack order.
6986 */
6987DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6988{
6989 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6990 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6991 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6992 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6993 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6994 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6995 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6996 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6997 pFpuCtx->aRegs[0].r80 = r80Tmp;
6998}
6999
7000
7001/**
7002 * Rotates the stack registers in the pop direction.
7003 *
7004 * @param pFpuCtx The FPU context.
7005 * @remarks This is a complete waste of time, but fxsave stores the registers in
7006 * stack order.
7007 */
7008DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7009{
7010 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7011 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7012 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7013 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7014 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7015 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7016 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7017 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7018 pFpuCtx->aRegs[7].r80 = r80Tmp;
7019}
7020
7021
7022/**
7023 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7024 * exception prevents it.
7025 *
7026 * @param pResult The FPU operation result to push.
7027 * @param pFpuCtx The FPU context.
7028 */
7029IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7030{
7031 /* Update FSW and bail if there are pending exceptions afterwards. */
7032 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7033 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7034 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7035 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7036 {
7037 pFpuCtx->FSW = fFsw;
7038 return;
7039 }
7040
7041 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7042 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7043 {
7044 /* All is fine, push the actual value. */
7045 pFpuCtx->FTW |= RT_BIT(iNewTop);
7046 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7047 }
7048 else if (pFpuCtx->FCW & X86_FCW_IM)
7049 {
7050 /* Masked stack overflow, push QNaN. */
7051 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7052 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7053 }
7054 else
7055 {
7056 /* Raise stack overflow, don't push anything. */
7057 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7058 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7059 return;
7060 }
7061
7062 fFsw &= ~X86_FSW_TOP_MASK;
7063 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7064 pFpuCtx->FSW = fFsw;
7065
7066 iemFpuRotateStackPush(pFpuCtx);
7067}
7068
7069
7070/**
7071 * Stores a result in a FPU register and updates the FSW and FTW.
7072 *
7073 * @param pFpuCtx The FPU context.
7074 * @param pResult The result to store.
7075 * @param iStReg Which FPU register to store it in.
7076 */
7077IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7078{
7079 Assert(iStReg < 8);
7080 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7081 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7082 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7083 pFpuCtx->FTW |= RT_BIT(iReg);
7084 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7085}
7086
7087
7088/**
7089 * Only updates the FPU status word (FSW) with the result of the current
7090 * instruction.
7091 *
7092 * @param pFpuCtx The FPU context.
7093 * @param u16FSW The FSW output of the current instruction.
7094 */
7095IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7096{
7097 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7098 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7099}
7100
7101
7102/**
7103 * Pops one item off the FPU stack if no pending exception prevents it.
7104 *
7105 * @param pFpuCtx The FPU context.
7106 */
7107IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7108{
7109 /* Check pending exceptions. */
7110 uint16_t uFSW = pFpuCtx->FSW;
7111 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7112 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7113 return;
7114
7115 /* TOP--. */
7116 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7117 uFSW &= ~X86_FSW_TOP_MASK;
7118 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7119 pFpuCtx->FSW = uFSW;
7120
7121 /* Mark the previous ST0 as empty. */
7122 iOldTop >>= X86_FSW_TOP_SHIFT;
7123 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7124
7125 /* Rotate the registers. */
7126 iemFpuRotateStackPop(pFpuCtx);
7127}
7128
7129
7130/**
7131 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7132 *
7133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7134 * @param pResult The FPU operation result to push.
7135 */
7136IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7137{
7138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7139 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7140 iemFpuMaybePushResult(pResult, pFpuCtx);
7141}
7142
7143
7144/**
7145 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7146 * and sets FPUDP and FPUDS.
7147 *
7148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7149 * @param pResult The FPU operation result to push.
7150 * @param iEffSeg The effective segment register.
7151 * @param GCPtrEff The effective address relative to @a iEffSeg.
7152 */
7153IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7154{
7155 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7156 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7157 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7158 iemFpuMaybePushResult(pResult, pFpuCtx);
7159}
7160
7161
7162/**
7163 * Replace ST0 with the first value and push the second onto the FPU stack,
7164 * unless a pending exception prevents it.
7165 *
7166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7167 * @param pResult The FPU operation result to store and push.
7168 */
7169IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7170{
7171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7172 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7173
7174 /* Update FSW and bail if there are pending exceptions afterwards. */
7175 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7176 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7177 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7178 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7179 {
7180 pFpuCtx->FSW = fFsw;
7181 return;
7182 }
7183
7184 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7185 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7186 {
7187 /* All is fine, push the actual value. */
7188 pFpuCtx->FTW |= RT_BIT(iNewTop);
7189 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7190 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7191 }
7192 else if (pFpuCtx->FCW & X86_FCW_IM)
7193 {
7194 /* Masked stack overflow, push QNaN. */
7195 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7196 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7197 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7198 }
7199 else
7200 {
7201 /* Raise stack overflow, don't push anything. */
7202 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7203 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7204 return;
7205 }
7206
7207 fFsw &= ~X86_FSW_TOP_MASK;
7208 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7209 pFpuCtx->FSW = fFsw;
7210
7211 iemFpuRotateStackPush(pFpuCtx);
7212}
7213
7214
7215/**
7216 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7217 * FOP.
7218 *
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pResult The result to store.
7221 * @param iStReg Which FPU register to store it in.
7222 */
7223IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7224{
7225 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7227 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7228}
7229
7230
7231/**
7232 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7233 * FOP, and then pops the stack.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pResult The result to store.
7237 * @param iStReg Which FPU register to store it in.
7238 */
7239IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7240{
7241 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7242 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7243 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7244 iemFpuMaybePopOne(pFpuCtx);
7245}
7246
7247
7248/**
7249 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7250 * FPUDP, and FPUDS.
7251 *
7252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7253 * @param pResult The result to store.
7254 * @param iStReg Which FPU register to store it in.
7255 * @param iEffSeg The effective memory operand selector register.
7256 * @param GCPtrEff The effective memory operand offset.
7257 */
7258IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7259 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7260{
7261 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7262 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7263 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7264 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7265}
7266
7267
7268/**
7269 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7270 * FPUDP, and FPUDS, and then pops the stack.
7271 *
7272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7273 * @param pResult The result to store.
7274 * @param iStReg Which FPU register to store it in.
7275 * @param iEffSeg The effective memory operand selector register.
7276 * @param GCPtrEff The effective memory operand offset.
7277 */
7278IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7279 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7280{
7281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7282 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7284 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7285 iemFpuMaybePopOne(pFpuCtx);
7286}
7287
7288
7289/**
7290 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7291 *
7292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7293 */
7294IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7295{
7296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7298}
7299
7300
7301/**
7302 * Marks the specified stack register as free (for FFREE).
7303 *
7304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7305 * @param iStReg The register to free.
7306 */
7307IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7308{
7309 Assert(iStReg < 8);
7310 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7311 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7312 pFpuCtx->FTW &= ~RT_BIT(iReg);
7313}
7314
7315
7316/**
7317 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7318 *
7319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7320 */
7321IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7322{
7323 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7324 uint16_t uFsw = pFpuCtx->FSW;
7325 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7326 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7327 uFsw &= ~X86_FSW_TOP_MASK;
7328 uFsw |= uTop;
7329 pFpuCtx->FSW = uFsw;
7330}
7331
7332
7333/**
7334 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7335 *
7336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7337 */
7338IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7339{
7340 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7341 uint16_t uFsw = pFpuCtx->FSW;
7342 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7343 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7344 uFsw &= ~X86_FSW_TOP_MASK;
7345 uFsw |= uTop;
7346 pFpuCtx->FSW = uFsw;
7347}
7348
7349
7350/**
7351 * Updates the FSW, FOP, FPUIP, and FPUCS.
7352 *
7353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7354 * @param u16FSW The FSW from the current instruction.
7355 */
7356IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7357{
7358 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7359 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7360 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7361}
7362
7363
7364/**
7365 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7366 *
7367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7368 * @param u16FSW The FSW from the current instruction.
7369 */
7370IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7371{
7372 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7373 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7374 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7375 iemFpuMaybePopOne(pFpuCtx);
7376}
7377
7378
7379/**
7380 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7381 *
7382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7383 * @param u16FSW The FSW from the current instruction.
7384 * @param iEffSeg The effective memory operand selector register.
7385 * @param GCPtrEff The effective memory operand offset.
7386 */
7387IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7388{
7389 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7390 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7392 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7393}
7394
7395
7396/**
7397 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7398 *
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 * @param u16FSW The FSW from the current instruction.
7401 */
7402IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7403{
7404 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7405 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7406 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7407 iemFpuMaybePopOne(pFpuCtx);
7408 iemFpuMaybePopOne(pFpuCtx);
7409}
7410
7411
7412/**
7413 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7414 *
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param u16FSW The FSW from the current instruction.
7417 * @param iEffSeg The effective memory operand selector register.
7418 * @param GCPtrEff The effective memory operand offset.
7419 */
7420IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7421{
7422 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7423 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7424 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7425 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7426 iemFpuMaybePopOne(pFpuCtx);
7427}
7428
7429
7430/**
7431 * Worker routine for raising an FPU stack underflow exception.
7432 *
7433 * @param pFpuCtx The FPU context.
7434 * @param iStReg The stack register being accessed.
7435 */
7436IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7437{
7438 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7439 if (pFpuCtx->FCW & X86_FCW_IM)
7440 {
7441 /* Masked underflow. */
7442 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7443 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7444 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7445 if (iStReg != UINT8_MAX)
7446 {
7447 pFpuCtx->FTW |= RT_BIT(iReg);
7448 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7449 }
7450 }
7451 else
7452 {
7453 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7454 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7455 }
7456}
7457
7458
7459/**
7460 * Raises a FPU stack underflow exception.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param iStReg The destination register that should be loaded
7464 * with QNaN if \#IS is not masked. Specify
7465 * UINT8_MAX if none (like for fcom).
7466 */
7467DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7468{
7469 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7470 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7471 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7472}
7473
7474
7475DECL_NO_INLINE(IEM_STATIC, void)
7476iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7477{
7478 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7479 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7480 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7481 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7482}
7483
7484
7485DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7486{
7487 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7488 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7489 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7490 iemFpuMaybePopOne(pFpuCtx);
7491}
7492
7493
7494DECL_NO_INLINE(IEM_STATIC, void)
7495iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7496{
7497 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7498 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7500 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7501 iemFpuMaybePopOne(pFpuCtx);
7502}
7503
7504
7505DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7506{
7507 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7508 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7509 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7510 iemFpuMaybePopOne(pFpuCtx);
7511 iemFpuMaybePopOne(pFpuCtx);
7512}
7513
7514
7515DECL_NO_INLINE(IEM_STATIC, void)
7516iemFpuStackPushUnderflow(PVMCPU pVCpu)
7517{
7518 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7519 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7520
7521 if (pFpuCtx->FCW & X86_FCW_IM)
7522 {
7523 /* Masked overflow - Push QNaN. */
7524 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7525 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7526 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7527 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7528 pFpuCtx->FTW |= RT_BIT(iNewTop);
7529 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7530 iemFpuRotateStackPush(pFpuCtx);
7531 }
7532 else
7533 {
7534 /* Exception pending - don't change TOP or the register stack. */
7535 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7536 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7537 }
7538}
7539
7540
7541DECL_NO_INLINE(IEM_STATIC, void)
7542iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7543{
7544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7546
7547 if (pFpuCtx->FCW & X86_FCW_IM)
7548 {
7549 /* Masked overflow - Push QNaN. */
7550 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7551 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7552 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7553 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7554 pFpuCtx->FTW |= RT_BIT(iNewTop);
7555 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7556 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7557 iemFpuRotateStackPush(pFpuCtx);
7558 }
7559 else
7560 {
7561 /* Exception pending - don't change TOP or the register stack. */
7562 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7563 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7564 }
7565}
7566
7567
7568/**
7569 * Worker routine for raising an FPU stack overflow exception on a push.
7570 *
7571 * @param pFpuCtx The FPU context.
7572 */
7573IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7574{
7575 if (pFpuCtx->FCW & X86_FCW_IM)
7576 {
7577 /* Masked overflow. */
7578 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7579 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7580 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7581 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7582 pFpuCtx->FTW |= RT_BIT(iNewTop);
7583 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7584 iemFpuRotateStackPush(pFpuCtx);
7585 }
7586 else
7587 {
7588 /* Exception pending - don't change TOP or the register stack. */
7589 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7590 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7591 }
7592}
7593
7594
7595/**
7596 * Raises a FPU stack overflow exception on a push.
7597 *
7598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7599 */
7600DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7601{
7602 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7603 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7604 iemFpuStackPushOverflowOnly(pFpuCtx);
7605}
7606
7607
7608/**
7609 * Raises a FPU stack overflow exception on a push with a memory operand.
7610 *
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param iEffSeg The effective memory operand selector register.
7613 * @param GCPtrEff The effective memory operand offset.
7614 */
7615DECL_NO_INLINE(IEM_STATIC, void)
7616iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7617{
7618 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7619 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7620 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7621 iemFpuStackPushOverflowOnly(pFpuCtx);
7622}
7623
7624
7625IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7626{
7627 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7628 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7629 if (pFpuCtx->FTW & RT_BIT(iReg))
7630 return VINF_SUCCESS;
7631 return VERR_NOT_FOUND;
7632}
7633
7634
7635IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7636{
7637 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7638 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7639 if (pFpuCtx->FTW & RT_BIT(iReg))
7640 {
7641 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7642 return VINF_SUCCESS;
7643 }
7644 return VERR_NOT_FOUND;
7645}
7646
7647
7648IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7649 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7650{
7651 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7652 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7653 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7654 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7655 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7656 {
7657 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7658 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7659 return VINF_SUCCESS;
7660 }
7661 return VERR_NOT_FOUND;
7662}
7663
7664
7665IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7666{
7667 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7668 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7669 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7670 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7671 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7672 {
7673 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7674 return VINF_SUCCESS;
7675 }
7676 return VERR_NOT_FOUND;
7677}
7678
7679
7680/**
7681 * Updates the FPU exception status after FCW is changed.
7682 *
7683 * @param pFpuCtx The FPU context.
7684 */
7685IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7686{
7687 uint16_t u16Fsw = pFpuCtx->FSW;
7688 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7689 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7690 else
7691 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7692 pFpuCtx->FSW = u16Fsw;
7693}
7694
7695
7696/**
7697 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7698 *
7699 * @returns The full FTW.
7700 * @param pFpuCtx The FPU context.
7701 */
7702IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7703{
7704 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7705 uint16_t u16Ftw = 0;
7706 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7707 for (unsigned iSt = 0; iSt < 8; iSt++)
7708 {
7709 unsigned const iReg = (iSt + iTop) & 7;
7710 if (!(u8Ftw & RT_BIT(iReg)))
7711 u16Ftw |= 3 << (iReg * 2); /* empty */
7712 else
7713 {
7714 uint16_t uTag;
7715 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7716 if (pr80Reg->s.uExponent == 0x7fff)
7717 uTag = 2; /* Exponent is all 1's => Special. */
7718 else if (pr80Reg->s.uExponent == 0x0000)
7719 {
7720 if (pr80Reg->s.u64Mantissa == 0x0000)
7721 uTag = 1; /* All bits are zero => Zero. */
7722 else
7723 uTag = 2; /* Must be special. */
7724 }
7725 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7726 uTag = 0; /* Valid. */
7727 else
7728 uTag = 2; /* Must be special. */
7729
7730 u16Ftw |= uTag << (iReg * 2); /* empty */
7731 }
7732 }
7733
7734 return u16Ftw;
7735}
7736
7737
7738/**
7739 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7740 *
7741 * @returns The compressed FTW.
7742 * @param u16FullFtw The full FTW to convert.
7743 */
7744IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7745{
7746 uint8_t u8Ftw = 0;
7747 for (unsigned i = 0; i < 8; i++)
7748 {
7749 if ((u16FullFtw & 3) != 3 /*empty*/)
7750 u8Ftw |= RT_BIT(i);
7751 u16FullFtw >>= 2;
7752 }
7753
7754 return u8Ftw;
7755}
7756
7757/** @} */
7758
7759
7760/** @name Memory access.
7761 *
7762 * @{
7763 */
7764
7765
7766/**
7767 * Updates the IEMCPU::cbWritten counter if applicable.
7768 *
7769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7770 * @param fAccess The access being accounted for.
7771 * @param cbMem The access size.
7772 */
7773DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7774{
7775 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7776 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7777 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7778}
7779
7780
7781/**
7782 * Checks if the given segment can be written to, raise the appropriate
7783 * exception if not.
7784 *
7785 * @returns VBox strict status code.
7786 *
7787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7788 * @param pHid Pointer to the hidden register.
7789 * @param iSegReg The register number.
7790 * @param pu64BaseAddr Where to return the base address to use for the
7791 * segment. (In 64-bit code it may differ from the
7792 * base in the hidden segment.)
7793 */
7794IEM_STATIC VBOXSTRICTRC
7795iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7796{
7797 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7798
7799 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7800 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7801 else
7802 {
7803 if (!pHid->Attr.n.u1Present)
7804 {
7805 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7806 AssertRelease(uSel == 0);
7807 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7808 return iemRaiseGeneralProtectionFault0(pVCpu);
7809 }
7810
7811 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7812 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7813 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7814 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7815 *pu64BaseAddr = pHid->u64Base;
7816 }
7817 return VINF_SUCCESS;
7818}
7819
7820
7821/**
7822 * Checks if the given segment can be read from, raise the appropriate
7823 * exception if not.
7824 *
7825 * @returns VBox strict status code.
7826 *
7827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7828 * @param pHid Pointer to the hidden register.
7829 * @param iSegReg The register number.
7830 * @param pu64BaseAddr Where to return the base address to use for the
7831 * segment. (In 64-bit code it may differ from the
7832 * base in the hidden segment.)
7833 */
7834IEM_STATIC VBOXSTRICTRC
7835iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7836{
7837 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7838
7839 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7840 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7841 else
7842 {
7843 if (!pHid->Attr.n.u1Present)
7844 {
7845 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7846 AssertRelease(uSel == 0);
7847 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7848 return iemRaiseGeneralProtectionFault0(pVCpu);
7849 }
7850
7851 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7852 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7853 *pu64BaseAddr = pHid->u64Base;
7854 }
7855 return VINF_SUCCESS;
7856}
7857
7858
7859/**
7860 * Applies the segment limit, base and attributes.
7861 *
7862 * This may raise a \#GP or \#SS.
7863 *
7864 * @returns VBox strict status code.
7865 *
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param fAccess The kind of access which is being performed.
7868 * @param iSegReg The index of the segment register to apply.
7869 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7870 * TSS, ++).
7871 * @param cbMem The access size.
7872 * @param pGCPtrMem Pointer to the guest memory address to apply
7873 * segmentation to. Input and output parameter.
7874 */
7875IEM_STATIC VBOXSTRICTRC
7876iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7877{
7878 if (iSegReg == UINT8_MAX)
7879 return VINF_SUCCESS;
7880
7881 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7882 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7883 switch (pVCpu->iem.s.enmCpuMode)
7884 {
7885 case IEMMODE_16BIT:
7886 case IEMMODE_32BIT:
7887 {
7888 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7889 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7890
7891 if ( pSel->Attr.n.u1Present
7892 && !pSel->Attr.n.u1Unusable)
7893 {
7894 Assert(pSel->Attr.n.u1DescType);
7895 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7896 {
7897 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7898 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7899 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7900
7901 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7902 {
7903 /** @todo CPL check. */
7904 }
7905
7906 /*
7907 * There are two kinds of data selectors, normal and expand down.
7908 */
7909 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7910 {
7911 if ( GCPtrFirst32 > pSel->u32Limit
7912 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7913 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7914 }
7915 else
7916 {
7917 /*
7918 * The upper boundary is defined by the B bit, not the G bit!
7919 */
7920 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7921 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7922 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7923 }
7924 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7925 }
7926 else
7927 {
7928
7929 /*
7930 * Code selector and usually be used to read thru, writing is
7931 * only permitted in real and V8086 mode.
7932 */
7933 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7934 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7935 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7936 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7937 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7938
7939 if ( GCPtrFirst32 > pSel->u32Limit
7940 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7941 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7942
7943 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7944 {
7945 /** @todo CPL check. */
7946 }
7947
7948 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7949 }
7950 }
7951 else
7952 return iemRaiseGeneralProtectionFault0(pVCpu);
7953 return VINF_SUCCESS;
7954 }
7955
7956 case IEMMODE_64BIT:
7957 {
7958 RTGCPTR GCPtrMem = *pGCPtrMem;
7959 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7960 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7961
7962 Assert(cbMem >= 1);
7963 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7964 return VINF_SUCCESS;
7965 return iemRaiseGeneralProtectionFault0(pVCpu);
7966 }
7967
7968 default:
7969 AssertFailedReturn(VERR_IEM_IPE_7);
7970 }
7971}
7972
7973
7974/**
7975 * Translates a virtual address to a physical physical address and checks if we
7976 * can access the page as specified.
7977 *
7978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7979 * @param GCPtrMem The virtual address.
7980 * @param fAccess The intended access.
7981 * @param pGCPhysMem Where to return the physical address.
7982 */
7983IEM_STATIC VBOXSTRICTRC
7984iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7985{
7986 /** @todo Need a different PGM interface here. We're currently using
7987 * generic / REM interfaces. this won't cut it for R0 & RC. */
7988 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
7989 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
7990 RTGCPHYS GCPhys;
7991 uint64_t fFlags;
7992 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7993 if (RT_FAILURE(rc))
7994 {
7995 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7996 /** @todo Check unassigned memory in unpaged mode. */
7997 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7998 *pGCPhysMem = NIL_RTGCPHYS;
7999 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8000 }
8001
8002 /* If the page is writable and does not have the no-exec bit set, all
8003 access is allowed. Otherwise we'll have to check more carefully... */
8004 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8005 {
8006 /* Write to read only memory? */
8007 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8008 && !(fFlags & X86_PTE_RW)
8009 && ( (pVCpu->iem.s.uCpl == 3
8010 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8011 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8012 {
8013 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8014 *pGCPhysMem = NIL_RTGCPHYS;
8015 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8016 }
8017
8018 /* Kernel memory accessed by userland? */
8019 if ( !(fFlags & X86_PTE_US)
8020 && pVCpu->iem.s.uCpl == 3
8021 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8022 {
8023 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8024 *pGCPhysMem = NIL_RTGCPHYS;
8025 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8026 }
8027
8028 /* Executing non-executable memory? */
8029 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8030 && (fFlags & X86_PTE_PAE_NX)
8031 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8032 {
8033 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8034 *pGCPhysMem = NIL_RTGCPHYS;
8035 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8036 VERR_ACCESS_DENIED);
8037 }
8038 }
8039
8040 /*
8041 * Set the dirty / access flags.
8042 * ASSUMES this is set when the address is translated rather than on committ...
8043 */
8044 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8045 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8046 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8047 {
8048 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8049 AssertRC(rc2);
8050 }
8051
8052 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8053 *pGCPhysMem = GCPhys;
8054 return VINF_SUCCESS;
8055}
8056
8057
8058
8059/**
8060 * Maps a physical page.
8061 *
8062 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8064 * @param GCPhysMem The physical address.
8065 * @param fAccess The intended access.
8066 * @param ppvMem Where to return the mapping address.
8067 * @param pLock The PGM lock.
8068 */
8069IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8070{
8071#ifdef IEM_LOG_MEMORY_WRITES
8072 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8073 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8074#endif
8075
8076 /** @todo This API may require some improving later. A private deal with PGM
8077 * regarding locking and unlocking needs to be struct. A couple of TLBs
8078 * living in PGM, but with publicly accessible inlined access methods
8079 * could perhaps be an even better solution. */
8080 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8081 GCPhysMem,
8082 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8083 pVCpu->iem.s.fBypassHandlers,
8084 ppvMem,
8085 pLock);
8086 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8087 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8088
8089 return rc;
8090}
8091
8092
8093/**
8094 * Unmap a page previously mapped by iemMemPageMap.
8095 *
8096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8097 * @param GCPhysMem The physical address.
8098 * @param fAccess The intended access.
8099 * @param pvMem What iemMemPageMap returned.
8100 * @param pLock The PGM lock.
8101 */
8102DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8103{
8104 NOREF(pVCpu);
8105 NOREF(GCPhysMem);
8106 NOREF(fAccess);
8107 NOREF(pvMem);
8108 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8109}
8110
8111
8112/**
8113 * Looks up a memory mapping entry.
8114 *
8115 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8117 * @param pvMem The memory address.
8118 * @param fAccess The access to.
8119 */
8120DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8121{
8122 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8123 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8124 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8125 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8126 return 0;
8127 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8128 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8129 return 1;
8130 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8131 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8132 return 2;
8133 return VERR_NOT_FOUND;
8134}
8135
8136
8137/**
8138 * Finds a free memmap entry when using iNextMapping doesn't work.
8139 *
8140 * @returns Memory mapping index, 1024 on failure.
8141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8142 */
8143IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8144{
8145 /*
8146 * The easy case.
8147 */
8148 if (pVCpu->iem.s.cActiveMappings == 0)
8149 {
8150 pVCpu->iem.s.iNextMapping = 1;
8151 return 0;
8152 }
8153
8154 /* There should be enough mappings for all instructions. */
8155 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8156
8157 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8158 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8159 return i;
8160
8161 AssertFailedReturn(1024);
8162}
8163
8164
8165/**
8166 * Commits a bounce buffer that needs writing back and unmaps it.
8167 *
8168 * @returns Strict VBox status code.
8169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8170 * @param iMemMap The index of the buffer to commit.
8171 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8172 * Always false in ring-3, obviously.
8173 */
8174IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8175{
8176 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8177 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8178#ifdef IN_RING3
8179 Assert(!fPostponeFail);
8180 RT_NOREF_PV(fPostponeFail);
8181#endif
8182
8183 /*
8184 * Do the writing.
8185 */
8186 PVM pVM = pVCpu->CTX_SUFF(pVM);
8187 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8188 {
8189 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8190 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8191 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8192 if (!pVCpu->iem.s.fBypassHandlers)
8193 {
8194 /*
8195 * Carefully and efficiently dealing with access handler return
8196 * codes make this a little bloated.
8197 */
8198 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8199 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8200 pbBuf,
8201 cbFirst,
8202 PGMACCESSORIGIN_IEM);
8203 if (rcStrict == VINF_SUCCESS)
8204 {
8205 if (cbSecond)
8206 {
8207 rcStrict = PGMPhysWrite(pVM,
8208 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8209 pbBuf + cbFirst,
8210 cbSecond,
8211 PGMACCESSORIGIN_IEM);
8212 if (rcStrict == VINF_SUCCESS)
8213 { /* nothing */ }
8214 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8215 {
8216 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8218 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8219 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8220 }
8221#ifndef IN_RING3
8222 else if (fPostponeFail)
8223 {
8224 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8225 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8227 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8228 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8229 return iemSetPassUpStatus(pVCpu, rcStrict);
8230 }
8231#endif
8232 else
8233 {
8234 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8235 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8236 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8237 return rcStrict;
8238 }
8239 }
8240 }
8241 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8242 {
8243 if (!cbSecond)
8244 {
8245 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8246 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8247 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8248 }
8249 else
8250 {
8251 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8252 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8253 pbBuf + cbFirst,
8254 cbSecond,
8255 PGMACCESSORIGIN_IEM);
8256 if (rcStrict2 == VINF_SUCCESS)
8257 {
8258 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8259 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8260 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8261 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8262 }
8263 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8264 {
8265 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8266 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8267 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8268 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8269 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8270 }
8271#ifndef IN_RING3
8272 else if (fPostponeFail)
8273 {
8274 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8275 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8276 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8277 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8278 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8279 return iemSetPassUpStatus(pVCpu, rcStrict);
8280 }
8281#endif
8282 else
8283 {
8284 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8285 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8286 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8287 return rcStrict2;
8288 }
8289 }
8290 }
8291#ifndef IN_RING3
8292 else if (fPostponeFail)
8293 {
8294 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8295 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8297 if (!cbSecond)
8298 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8299 else
8300 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8301 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8302 return iemSetPassUpStatus(pVCpu, rcStrict);
8303 }
8304#endif
8305 else
8306 {
8307 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8308 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8309 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8310 return rcStrict;
8311 }
8312 }
8313 else
8314 {
8315 /*
8316 * No access handlers, much simpler.
8317 */
8318 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8319 if (RT_SUCCESS(rc))
8320 {
8321 if (cbSecond)
8322 {
8323 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8324 if (RT_SUCCESS(rc))
8325 { /* likely */ }
8326 else
8327 {
8328 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8329 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8331 return rc;
8332 }
8333 }
8334 }
8335 else
8336 {
8337 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8338 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8340 return rc;
8341 }
8342 }
8343 }
8344
8345#if defined(IEM_LOG_MEMORY_WRITES)
8346 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8347 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8348 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8349 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8350 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8351 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8352
8353 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8354 g_cbIemWrote = cbWrote;
8355 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8356#endif
8357
8358 /*
8359 * Free the mapping entry.
8360 */
8361 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8362 Assert(pVCpu->iem.s.cActiveMappings != 0);
8363 pVCpu->iem.s.cActiveMappings--;
8364 return VINF_SUCCESS;
8365}
8366
8367
8368/**
8369 * iemMemMap worker that deals with a request crossing pages.
8370 */
8371IEM_STATIC VBOXSTRICTRC
8372iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8373{
8374 /*
8375 * Do the address translations.
8376 */
8377 RTGCPHYS GCPhysFirst;
8378 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8379 if (rcStrict != VINF_SUCCESS)
8380 return rcStrict;
8381
8382 RTGCPHYS GCPhysSecond;
8383 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8384 fAccess, &GCPhysSecond);
8385 if (rcStrict != VINF_SUCCESS)
8386 return rcStrict;
8387 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8388
8389 PVM pVM = pVCpu->CTX_SUFF(pVM);
8390
8391 /*
8392 * Read in the current memory content if it's a read, execute or partial
8393 * write access.
8394 */
8395 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8396 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8397 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8398
8399 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8400 {
8401 if (!pVCpu->iem.s.fBypassHandlers)
8402 {
8403 /*
8404 * Must carefully deal with access handler status codes here,
8405 * makes the code a bit bloated.
8406 */
8407 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8408 if (rcStrict == VINF_SUCCESS)
8409 {
8410 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8411 if (rcStrict == VINF_SUCCESS)
8412 { /*likely */ }
8413 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8414 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8415 else
8416 {
8417 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8418 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8419 return rcStrict;
8420 }
8421 }
8422 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8423 {
8424 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8425 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8426 {
8427 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8428 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8429 }
8430 else
8431 {
8432 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8433 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8434 return rcStrict2;
8435 }
8436 }
8437 else
8438 {
8439 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8440 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8441 return rcStrict;
8442 }
8443 }
8444 else
8445 {
8446 /*
8447 * No informational status codes here, much more straight forward.
8448 */
8449 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8450 if (RT_SUCCESS(rc))
8451 {
8452 Assert(rc == VINF_SUCCESS);
8453 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8454 if (RT_SUCCESS(rc))
8455 Assert(rc == VINF_SUCCESS);
8456 else
8457 {
8458 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8459 return rc;
8460 }
8461 }
8462 else
8463 {
8464 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8465 return rc;
8466 }
8467 }
8468 }
8469#ifdef VBOX_STRICT
8470 else
8471 memset(pbBuf, 0xcc, cbMem);
8472 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8473 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8474#endif
8475
8476 /*
8477 * Commit the bounce buffer entry.
8478 */
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8484 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8485 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8486 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8487 pVCpu->iem.s.cActiveMappings++;
8488
8489 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8490 *ppvMem = pbBuf;
8491 return VINF_SUCCESS;
8492}
8493
8494
8495/**
8496 * iemMemMap woker that deals with iemMemPageMap failures.
8497 */
8498IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8499 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8500{
8501 /*
8502 * Filter out conditions we can handle and the ones which shouldn't happen.
8503 */
8504 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8505 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8506 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8507 {
8508 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8509 return rcMap;
8510 }
8511 pVCpu->iem.s.cPotentialExits++;
8512
8513 /*
8514 * Read in the current memory content if it's a read, execute or partial
8515 * write access.
8516 */
8517 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8518 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8519 {
8520 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8521 memset(pbBuf, 0xff, cbMem);
8522 else
8523 {
8524 int rc;
8525 if (!pVCpu->iem.s.fBypassHandlers)
8526 {
8527 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8528 if (rcStrict == VINF_SUCCESS)
8529 { /* nothing */ }
8530 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8531 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8532 else
8533 {
8534 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8535 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8536 return rcStrict;
8537 }
8538 }
8539 else
8540 {
8541 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8542 if (RT_SUCCESS(rc))
8543 { /* likely */ }
8544 else
8545 {
8546 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8547 GCPhysFirst, rc));
8548 return rc;
8549 }
8550 }
8551 }
8552 }
8553#ifdef VBOX_STRICT
8554 else
8555 memset(pbBuf, 0xcc, cbMem);
8556#endif
8557#ifdef VBOX_STRICT
8558 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8559 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8560#endif
8561
8562 /*
8563 * Commit the bounce buffer entry.
8564 */
8565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8566 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8567 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8568 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8569 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8570 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8571 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8572 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8573 pVCpu->iem.s.cActiveMappings++;
8574
8575 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8576 *ppvMem = pbBuf;
8577 return VINF_SUCCESS;
8578}
8579
8580
8581
8582/**
8583 * Maps the specified guest memory for the given kind of access.
8584 *
8585 * This may be using bounce buffering of the memory if it's crossing a page
8586 * boundary or if there is an access handler installed for any of it. Because
8587 * of lock prefix guarantees, we're in for some extra clutter when this
8588 * happens.
8589 *
8590 * This may raise a \#GP, \#SS, \#PF or \#AC.
8591 *
8592 * @returns VBox strict status code.
8593 *
8594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8595 * @param ppvMem Where to return the pointer to the mapped
8596 * memory.
8597 * @param cbMem The number of bytes to map. This is usually 1,
8598 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8599 * string operations it can be up to a page.
8600 * @param iSegReg The index of the segment register to use for
8601 * this access. The base and limits are checked.
8602 * Use UINT8_MAX to indicate that no segmentation
8603 * is required (for IDT, GDT and LDT accesses).
8604 * @param GCPtrMem The address of the guest memory.
8605 * @param fAccess How the memory is being accessed. The
8606 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8607 * how to map the memory, while the
8608 * IEM_ACCESS_WHAT_XXX bit is used when raising
8609 * exceptions.
8610 */
8611IEM_STATIC VBOXSTRICTRC
8612iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8613{
8614 /*
8615 * Check the input and figure out which mapping entry to use.
8616 */
8617 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8618 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8619 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8620
8621 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8622 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8623 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8624 {
8625 iMemMap = iemMemMapFindFree(pVCpu);
8626 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8627 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8628 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8629 pVCpu->iem.s.aMemMappings[2].fAccess),
8630 VERR_IEM_IPE_9);
8631 }
8632
8633 /*
8634 * Map the memory, checking that we can actually access it. If something
8635 * slightly complicated happens, fall back on bounce buffering.
8636 */
8637 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8638 if (rcStrict != VINF_SUCCESS)
8639 return rcStrict;
8640
8641 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8642 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8643
8644 RTGCPHYS GCPhysFirst;
8645 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8646 if (rcStrict != VINF_SUCCESS)
8647 return rcStrict;
8648
8649 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8650 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8651 if (fAccess & IEM_ACCESS_TYPE_READ)
8652 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8653
8654 void *pvMem;
8655 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8656 if (rcStrict != VINF_SUCCESS)
8657 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8658
8659 /*
8660 * Fill in the mapping table entry.
8661 */
8662 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8663 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8664 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8665 pVCpu->iem.s.cActiveMappings++;
8666
8667 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8668 *ppvMem = pvMem;
8669 return VINF_SUCCESS;
8670}
8671
8672
8673/**
8674 * Commits the guest memory if bounce buffered and unmaps it.
8675 *
8676 * @returns Strict VBox status code.
8677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8678 * @param pvMem The mapping.
8679 * @param fAccess The kind of access.
8680 */
8681IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8682{
8683 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8684 AssertReturn(iMemMap >= 0, iMemMap);
8685
8686 /* If it's bounce buffered, we may need to write back the buffer. */
8687 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8688 {
8689 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8690 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8691 }
8692 /* Otherwise unlock it. */
8693 else
8694 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8695
8696 /* Free the entry. */
8697 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8698 Assert(pVCpu->iem.s.cActiveMappings != 0);
8699 pVCpu->iem.s.cActiveMappings--;
8700 return VINF_SUCCESS;
8701}
8702
8703#ifdef IEM_WITH_SETJMP
8704
8705/**
8706 * Maps the specified guest memory for the given kind of access, longjmp on
8707 * error.
8708 *
8709 * This may be using bounce buffering of the memory if it's crossing a page
8710 * boundary or if there is an access handler installed for any of it. Because
8711 * of lock prefix guarantees, we're in for some extra clutter when this
8712 * happens.
8713 *
8714 * This may raise a \#GP, \#SS, \#PF or \#AC.
8715 *
8716 * @returns Pointer to the mapped memory.
8717 *
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param cbMem The number of bytes to map. This is usually 1,
8720 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8721 * string operations it can be up to a page.
8722 * @param iSegReg The index of the segment register to use for
8723 * this access. The base and limits are checked.
8724 * Use UINT8_MAX to indicate that no segmentation
8725 * is required (for IDT, GDT and LDT accesses).
8726 * @param GCPtrMem The address of the guest memory.
8727 * @param fAccess How the memory is being accessed. The
8728 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8729 * how to map the memory, while the
8730 * IEM_ACCESS_WHAT_XXX bit is used when raising
8731 * exceptions.
8732 */
8733IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8734{
8735 /*
8736 * Check the input and figure out which mapping entry to use.
8737 */
8738 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8739 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8740 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8741
8742 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8743 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8744 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8745 {
8746 iMemMap = iemMemMapFindFree(pVCpu);
8747 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8748 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8749 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8750 pVCpu->iem.s.aMemMappings[2].fAccess),
8751 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8752 }
8753
8754 /*
8755 * Map the memory, checking that we can actually access it. If something
8756 * slightly complicated happens, fall back on bounce buffering.
8757 */
8758 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8759 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8760 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8761
8762 /* Crossing a page boundary? */
8763 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8764 { /* No (likely). */ }
8765 else
8766 {
8767 void *pvMem;
8768 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8769 if (rcStrict == VINF_SUCCESS)
8770 return pvMem;
8771 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8772 }
8773
8774 RTGCPHYS GCPhysFirst;
8775 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8776 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8777 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8778
8779 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8780 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8781 if (fAccess & IEM_ACCESS_TYPE_READ)
8782 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8783
8784 void *pvMem;
8785 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8786 if (rcStrict == VINF_SUCCESS)
8787 { /* likely */ }
8788 else
8789 {
8790 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8791 if (rcStrict == VINF_SUCCESS)
8792 return pvMem;
8793 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8794 }
8795
8796 /*
8797 * Fill in the mapping table entry.
8798 */
8799 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8800 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8801 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8802 pVCpu->iem.s.cActiveMappings++;
8803
8804 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8805 return pvMem;
8806}
8807
8808
8809/**
8810 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8811 *
8812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8813 * @param pvMem The mapping.
8814 * @param fAccess The kind of access.
8815 */
8816IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8817{
8818 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8819 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8820
8821 /* If it's bounce buffered, we may need to write back the buffer. */
8822 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8823 {
8824 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8825 {
8826 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8827 if (rcStrict == VINF_SUCCESS)
8828 return;
8829 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8830 }
8831 }
8832 /* Otherwise unlock it. */
8833 else
8834 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8835
8836 /* Free the entry. */
8837 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8838 Assert(pVCpu->iem.s.cActiveMappings != 0);
8839 pVCpu->iem.s.cActiveMappings--;
8840}
8841
8842#endif /* IEM_WITH_SETJMP */
8843
8844#ifndef IN_RING3
8845/**
8846 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8847 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8848 *
8849 * Allows the instruction to be completed and retired, while the IEM user will
8850 * return to ring-3 immediately afterwards and do the postponed writes there.
8851 *
8852 * @returns VBox status code (no strict statuses). Caller must check
8853 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8855 * @param pvMem The mapping.
8856 * @param fAccess The kind of access.
8857 */
8858IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8859{
8860 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8861 AssertReturn(iMemMap >= 0, iMemMap);
8862
8863 /* If it's bounce buffered, we may need to write back the buffer. */
8864 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8865 {
8866 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8867 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8868 }
8869 /* Otherwise unlock it. */
8870 else
8871 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8872
8873 /* Free the entry. */
8874 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8875 Assert(pVCpu->iem.s.cActiveMappings != 0);
8876 pVCpu->iem.s.cActiveMappings--;
8877 return VINF_SUCCESS;
8878}
8879#endif
8880
8881
8882/**
8883 * Rollbacks mappings, releasing page locks and such.
8884 *
8885 * The caller shall only call this after checking cActiveMappings.
8886 *
8887 * @returns Strict VBox status code to pass up.
8888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8889 */
8890IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8891{
8892 Assert(pVCpu->iem.s.cActiveMappings > 0);
8893
8894 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8895 while (iMemMap-- > 0)
8896 {
8897 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8898 if (fAccess != IEM_ACCESS_INVALID)
8899 {
8900 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8902 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8904 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
8905 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
8906 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
8907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
8908 pVCpu->iem.s.cActiveMappings--;
8909 }
8910 }
8911}
8912
8913
8914/**
8915 * Fetches a data byte.
8916 *
8917 * @returns Strict VBox status code.
8918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8919 * @param pu8Dst Where to return the byte.
8920 * @param iSegReg The index of the segment register to use for
8921 * this access. The base and limits are checked.
8922 * @param GCPtrMem The address of the guest memory.
8923 */
8924IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8925{
8926 /* The lazy approach for now... */
8927 uint8_t const *pu8Src;
8928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8929 if (rc == VINF_SUCCESS)
8930 {
8931 *pu8Dst = *pu8Src;
8932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8933 }
8934 return rc;
8935}
8936
8937
8938#ifdef IEM_WITH_SETJMP
8939/**
8940 * Fetches a data byte, longjmp on error.
8941 *
8942 * @returns The byte.
8943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8944 * @param iSegReg The index of the segment register to use for
8945 * this access. The base and limits are checked.
8946 * @param GCPtrMem The address of the guest memory.
8947 */
8948DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8949{
8950 /* The lazy approach for now... */
8951 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8952 uint8_t const bRet = *pu8Src;
8953 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8954 return bRet;
8955}
8956#endif /* IEM_WITH_SETJMP */
8957
8958
8959/**
8960 * Fetches a data word.
8961 *
8962 * @returns Strict VBox status code.
8963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8964 * @param pu16Dst Where to return the word.
8965 * @param iSegReg The index of the segment register to use for
8966 * this access. The base and limits are checked.
8967 * @param GCPtrMem The address of the guest memory.
8968 */
8969IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8970{
8971 /* The lazy approach for now... */
8972 uint16_t const *pu16Src;
8973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8974 if (rc == VINF_SUCCESS)
8975 {
8976 *pu16Dst = *pu16Src;
8977 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8978 }
8979 return rc;
8980}
8981
8982
8983#ifdef IEM_WITH_SETJMP
8984/**
8985 * Fetches a data word, longjmp on error.
8986 *
8987 * @returns The word
8988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8989 * @param iSegReg The index of the segment register to use for
8990 * this access. The base and limits are checked.
8991 * @param GCPtrMem The address of the guest memory.
8992 */
8993DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8994{
8995 /* The lazy approach for now... */
8996 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8997 uint16_t const u16Ret = *pu16Src;
8998 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8999 return u16Ret;
9000}
9001#endif
9002
9003
9004/**
9005 * Fetches a data dword.
9006 *
9007 * @returns Strict VBox status code.
9008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9009 * @param pu32Dst Where to return the dword.
9010 * @param iSegReg The index of the segment register to use for
9011 * this access. The base and limits are checked.
9012 * @param GCPtrMem The address of the guest memory.
9013 */
9014IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9015{
9016 /* The lazy approach for now... */
9017 uint32_t const *pu32Src;
9018 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9019 if (rc == VINF_SUCCESS)
9020 {
9021 *pu32Dst = *pu32Src;
9022 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9023 }
9024 return rc;
9025}
9026
9027
9028#ifdef IEM_WITH_SETJMP
9029
9030IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9031{
9032 Assert(cbMem >= 1);
9033 Assert(iSegReg < X86_SREG_COUNT);
9034
9035 /*
9036 * 64-bit mode is simpler.
9037 */
9038 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9039 {
9040 if (iSegReg >= X86_SREG_FS)
9041 {
9042 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9043 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9044 GCPtrMem += pSel->u64Base;
9045 }
9046
9047 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9048 return GCPtrMem;
9049 }
9050 /*
9051 * 16-bit and 32-bit segmentation.
9052 */
9053 else
9054 {
9055 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9056 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9057 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9058 == X86DESCATTR_P /* data, expand up */
9059 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9060 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9061 {
9062 /* expand up */
9063 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9064 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9065 && GCPtrLast32 > (uint32_t)GCPtrMem))
9066 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9067 }
9068 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9069 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9070 {
9071 /* expand down */
9072 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9073 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9074 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9075 && GCPtrLast32 > (uint32_t)GCPtrMem))
9076 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9077 }
9078 else
9079 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9080 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9081 }
9082 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9083}
9084
9085
9086IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9087{
9088 Assert(cbMem >= 1);
9089 Assert(iSegReg < X86_SREG_COUNT);
9090
9091 /*
9092 * 64-bit mode is simpler.
9093 */
9094 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9095 {
9096 if (iSegReg >= X86_SREG_FS)
9097 {
9098 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9099 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9100 GCPtrMem += pSel->u64Base;
9101 }
9102
9103 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9104 return GCPtrMem;
9105 }
9106 /*
9107 * 16-bit and 32-bit segmentation.
9108 */
9109 else
9110 {
9111 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9112 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9113 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9114 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9115 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9116 {
9117 /* expand up */
9118 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9119 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9120 && GCPtrLast32 > (uint32_t)GCPtrMem))
9121 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9122 }
9123 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9124 {
9125 /* expand down */
9126 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9127 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9128 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9129 && GCPtrLast32 > (uint32_t)GCPtrMem))
9130 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9131 }
9132 else
9133 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9134 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9135 }
9136 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9137}
9138
9139
9140/**
9141 * Fetches a data dword, longjmp on error, fallback/safe version.
9142 *
9143 * @returns The dword
9144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9145 * @param iSegReg The index of the segment register to use for
9146 * this access. The base and limits are checked.
9147 * @param GCPtrMem The address of the guest memory.
9148 */
9149IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9150{
9151 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9152 uint32_t const u32Ret = *pu32Src;
9153 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9154 return u32Ret;
9155}
9156
9157
9158/**
9159 * Fetches a data dword, longjmp on error.
9160 *
9161 * @returns The dword
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169# ifdef IEM_WITH_DATA_TLB
9170 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9171 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9172 {
9173 /// @todo more later.
9174 }
9175
9176 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9177# else
9178 /* The lazy approach. */
9179 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9180 uint32_t const u32Ret = *pu32Src;
9181 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9182 return u32Ret;
9183# endif
9184}
9185#endif
9186
9187
9188#ifdef SOME_UNUSED_FUNCTION
9189/**
9190 * Fetches a data dword and sign extends it to a qword.
9191 *
9192 * @returns Strict VBox status code.
9193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9194 * @param pu64Dst Where to return the sign extended value.
9195 * @param iSegReg The index of the segment register to use for
9196 * this access. The base and limits are checked.
9197 * @param GCPtrMem The address of the guest memory.
9198 */
9199IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9200{
9201 /* The lazy approach for now... */
9202 int32_t const *pi32Src;
9203 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9204 if (rc == VINF_SUCCESS)
9205 {
9206 *pu64Dst = *pi32Src;
9207 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9208 }
9209#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9210 else
9211 *pu64Dst = 0;
9212#endif
9213 return rc;
9214}
9215#endif
9216
9217
9218/**
9219 * Fetches a data qword.
9220 *
9221 * @returns Strict VBox status code.
9222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9223 * @param pu64Dst Where to return the qword.
9224 * @param iSegReg The index of the segment register to use for
9225 * this access. The base and limits are checked.
9226 * @param GCPtrMem The address of the guest memory.
9227 */
9228IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9229{
9230 /* The lazy approach for now... */
9231 uint64_t const *pu64Src;
9232 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9233 if (rc == VINF_SUCCESS)
9234 {
9235 *pu64Dst = *pu64Src;
9236 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9237 }
9238 return rc;
9239}
9240
9241
9242#ifdef IEM_WITH_SETJMP
9243/**
9244 * Fetches a data qword, longjmp on error.
9245 *
9246 * @returns The qword.
9247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9248 * @param iSegReg The index of the segment register to use for
9249 * this access. The base and limits are checked.
9250 * @param GCPtrMem The address of the guest memory.
9251 */
9252DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9253{
9254 /* The lazy approach for now... */
9255 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9256 uint64_t const u64Ret = *pu64Src;
9257 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9258 return u64Ret;
9259}
9260#endif
9261
9262
9263/**
9264 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9265 *
9266 * @returns Strict VBox status code.
9267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9268 * @param pu64Dst Where to return the qword.
9269 * @param iSegReg The index of the segment register to use for
9270 * this access. The base and limits are checked.
9271 * @param GCPtrMem The address of the guest memory.
9272 */
9273IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9274{
9275 /* The lazy approach for now... */
9276 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9277 if (RT_UNLIKELY(GCPtrMem & 15))
9278 return iemRaiseGeneralProtectionFault0(pVCpu);
9279
9280 uint64_t const *pu64Src;
9281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9282 if (rc == VINF_SUCCESS)
9283 {
9284 *pu64Dst = *pu64Src;
9285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9286 }
9287 return rc;
9288}
9289
9290
9291#ifdef IEM_WITH_SETJMP
9292/**
9293 * Fetches a data qword, longjmp on error.
9294 *
9295 * @returns The qword.
9296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9297 * @param iSegReg The index of the segment register to use for
9298 * this access. The base and limits are checked.
9299 * @param GCPtrMem The address of the guest memory.
9300 */
9301DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9302{
9303 /* The lazy approach for now... */
9304 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9305 if (RT_LIKELY(!(GCPtrMem & 15)))
9306 {
9307 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9308 uint64_t const u64Ret = *pu64Src;
9309 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9310 return u64Ret;
9311 }
9312
9313 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9314 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9315}
9316#endif
9317
9318
9319/**
9320 * Fetches a data tword.
9321 *
9322 * @returns Strict VBox status code.
9323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9324 * @param pr80Dst Where to return the tword.
9325 * @param iSegReg The index of the segment register to use for
9326 * this access. The base and limits are checked.
9327 * @param GCPtrMem The address of the guest memory.
9328 */
9329IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9330{
9331 /* The lazy approach for now... */
9332 PCRTFLOAT80U pr80Src;
9333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9334 if (rc == VINF_SUCCESS)
9335 {
9336 *pr80Dst = *pr80Src;
9337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9338 }
9339 return rc;
9340}
9341
9342
9343#ifdef IEM_WITH_SETJMP
9344/**
9345 * Fetches a data tword, longjmp on error.
9346 *
9347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9348 * @param pr80Dst Where to return the tword.
9349 * @param iSegReg The index of the segment register to use for
9350 * this access. The base and limits are checked.
9351 * @param GCPtrMem The address of the guest memory.
9352 */
9353DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9354{
9355 /* The lazy approach for now... */
9356 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9357 *pr80Dst = *pr80Src;
9358 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9359}
9360#endif
9361
9362
9363/**
9364 * Fetches a data dqword (double qword), generally SSE related.
9365 *
9366 * @returns Strict VBox status code.
9367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9368 * @param pu128Dst Where to return the qword.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375 /* The lazy approach for now... */
9376 PCRTUINT128U pu128Src;
9377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9378 if (rc == VINF_SUCCESS)
9379 {
9380 pu128Dst->au64[0] = pu128Src->au64[0];
9381 pu128Dst->au64[1] = pu128Src->au64[1];
9382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9383 }
9384 return rc;
9385}
9386
9387
9388#ifdef IEM_WITH_SETJMP
9389/**
9390 * Fetches a data dqword (double qword), generally SSE related.
9391 *
9392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9393 * @param pu128Dst Where to return the qword.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 */
9398IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9399{
9400 /* The lazy approach for now... */
9401 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9402 pu128Dst->au64[0] = pu128Src->au64[0];
9403 pu128Dst->au64[1] = pu128Src->au64[1];
9404 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9405}
9406#endif
9407
9408
9409/**
9410 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9411 * related.
9412 *
9413 * Raises \#GP(0) if not aligned.
9414 *
9415 * @returns Strict VBox status code.
9416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9417 * @param pu128Dst Where to return the qword.
9418 * @param iSegReg The index of the segment register to use for
9419 * this access. The base and limits are checked.
9420 * @param GCPtrMem The address of the guest memory.
9421 */
9422IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9423{
9424 /* The lazy approach for now... */
9425 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9426 if ( (GCPtrMem & 15)
9427 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9428 return iemRaiseGeneralProtectionFault0(pVCpu);
9429
9430 PCRTUINT128U pu128Src;
9431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9432 if (rc == VINF_SUCCESS)
9433 {
9434 pu128Dst->au64[0] = pu128Src->au64[0];
9435 pu128Dst->au64[1] = pu128Src->au64[1];
9436 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9437 }
9438 return rc;
9439}
9440
9441
9442#ifdef IEM_WITH_SETJMP
9443/**
9444 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9445 * related, longjmp on error.
9446 *
9447 * Raises \#GP(0) if not aligned.
9448 *
9449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9450 * @param pu128Dst Where to return the qword.
9451 * @param iSegReg The index of the segment register to use for
9452 * this access. The base and limits are checked.
9453 * @param GCPtrMem The address of the guest memory.
9454 */
9455DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9456{
9457 /* The lazy approach for now... */
9458 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9459 if ( (GCPtrMem & 15) == 0
9460 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9461 {
9462 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9463 pu128Dst->au64[0] = pu128Src->au64[0];
9464 pu128Dst->au64[1] = pu128Src->au64[1];
9465 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9466 return;
9467 }
9468
9469 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9470 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9471}
9472#endif
9473
9474
9475/**
9476 * Fetches a data oword (octo word), generally AVX related.
9477 *
9478 * @returns Strict VBox status code.
9479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9480 * @param pu256Dst Where to return the qword.
9481 * @param iSegReg The index of the segment register to use for
9482 * this access. The base and limits are checked.
9483 * @param GCPtrMem The address of the guest memory.
9484 */
9485IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9486{
9487 /* The lazy approach for now... */
9488 PCRTUINT256U pu256Src;
9489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9490 if (rc == VINF_SUCCESS)
9491 {
9492 pu256Dst->au64[0] = pu256Src->au64[0];
9493 pu256Dst->au64[1] = pu256Src->au64[1];
9494 pu256Dst->au64[2] = pu256Src->au64[2];
9495 pu256Dst->au64[3] = pu256Src->au64[3];
9496 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9497 }
9498 return rc;
9499}
9500
9501
9502#ifdef IEM_WITH_SETJMP
9503/**
9504 * Fetches a data oword (octo word), generally AVX related.
9505 *
9506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9507 * @param pu256Dst Where to return the qword.
9508 * @param iSegReg The index of the segment register to use for
9509 * this access. The base and limits are checked.
9510 * @param GCPtrMem The address of the guest memory.
9511 */
9512IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9513{
9514 /* The lazy approach for now... */
9515 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9516 pu256Dst->au64[0] = pu256Src->au64[0];
9517 pu256Dst->au64[1] = pu256Src->au64[1];
9518 pu256Dst->au64[2] = pu256Src->au64[2];
9519 pu256Dst->au64[3] = pu256Src->au64[3];
9520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9521}
9522#endif
9523
9524
9525/**
9526 * Fetches a data oword (octo word) at an aligned address, generally AVX
9527 * related.
9528 *
9529 * Raises \#GP(0) if not aligned.
9530 *
9531 * @returns Strict VBox status code.
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pu256Dst Where to return the qword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9542 if (GCPtrMem & 31)
9543 return iemRaiseGeneralProtectionFault0(pVCpu);
9544
9545 PCRTUINT256U pu256Src;
9546 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9547 if (rc == VINF_SUCCESS)
9548 {
9549 pu256Dst->au64[0] = pu256Src->au64[0];
9550 pu256Dst->au64[1] = pu256Src->au64[1];
9551 pu256Dst->au64[2] = pu256Src->au64[2];
9552 pu256Dst->au64[3] = pu256Src->au64[3];
9553 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9554 }
9555 return rc;
9556}
9557
9558
9559#ifdef IEM_WITH_SETJMP
9560/**
9561 * Fetches a data oword (octo word) at an aligned address, generally AVX
9562 * related, longjmp on error.
9563 *
9564 * Raises \#GP(0) if not aligned.
9565 *
9566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9567 * @param pu256Dst Where to return the qword.
9568 * @param iSegReg The index of the segment register to use for
9569 * this access. The base and limits are checked.
9570 * @param GCPtrMem The address of the guest memory.
9571 */
9572DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9573{
9574 /* The lazy approach for now... */
9575 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9576 if ((GCPtrMem & 31) == 0)
9577 {
9578 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9579 pu256Dst->au64[0] = pu256Src->au64[0];
9580 pu256Dst->au64[1] = pu256Src->au64[1];
9581 pu256Dst->au64[2] = pu256Src->au64[2];
9582 pu256Dst->au64[3] = pu256Src->au64[3];
9583 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9584 return;
9585 }
9586
9587 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9588 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9589}
9590#endif
9591
9592
9593
9594/**
9595 * Fetches a descriptor register (lgdt, lidt).
9596 *
9597 * @returns Strict VBox status code.
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param pcbLimit Where to return the limit.
9600 * @param pGCPtrBase Where to return the base.
9601 * @param iSegReg The index of the segment register to use for
9602 * this access. The base and limits are checked.
9603 * @param GCPtrMem The address of the guest memory.
9604 * @param enmOpSize The effective operand size.
9605 */
9606IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9607 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9608{
9609 /*
9610 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9611 * little special:
9612 * - The two reads are done separately.
9613 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9614 * - We suspect the 386 to actually commit the limit before the base in
9615 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9616 * don't try emulate this eccentric behavior, because it's not well
9617 * enough understood and rather hard to trigger.
9618 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9619 */
9620 VBOXSTRICTRC rcStrict;
9621 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9622 {
9623 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9624 if (rcStrict == VINF_SUCCESS)
9625 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9626 }
9627 else
9628 {
9629 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9630 if (enmOpSize == IEMMODE_32BIT)
9631 {
9632 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9633 {
9634 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9635 if (rcStrict == VINF_SUCCESS)
9636 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9637 }
9638 else
9639 {
9640 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9641 if (rcStrict == VINF_SUCCESS)
9642 {
9643 *pcbLimit = (uint16_t)uTmp;
9644 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9645 }
9646 }
9647 if (rcStrict == VINF_SUCCESS)
9648 *pGCPtrBase = uTmp;
9649 }
9650 else
9651 {
9652 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9653 if (rcStrict == VINF_SUCCESS)
9654 {
9655 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9656 if (rcStrict == VINF_SUCCESS)
9657 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9658 }
9659 }
9660 }
9661 return rcStrict;
9662}
9663
9664
9665
9666/**
9667 * Stores a data byte.
9668 *
9669 * @returns Strict VBox status code.
9670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9671 * @param iSegReg The index of the segment register to use for
9672 * this access. The base and limits are checked.
9673 * @param GCPtrMem The address of the guest memory.
9674 * @param u8Value The value to store.
9675 */
9676IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9677{
9678 /* The lazy approach for now... */
9679 uint8_t *pu8Dst;
9680 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9681 if (rc == VINF_SUCCESS)
9682 {
9683 *pu8Dst = u8Value;
9684 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9685 }
9686 return rc;
9687}
9688
9689
9690#ifdef IEM_WITH_SETJMP
9691/**
9692 * Stores a data byte, longjmp on error.
9693 *
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param iSegReg The index of the segment register to use for
9696 * this access. The base and limits are checked.
9697 * @param GCPtrMem The address of the guest memory.
9698 * @param u8Value The value to store.
9699 */
9700IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9701{
9702 /* The lazy approach for now... */
9703 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9704 *pu8Dst = u8Value;
9705 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9706}
9707#endif
9708
9709
9710/**
9711 * Stores a data word.
9712 *
9713 * @returns Strict VBox status code.
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param iSegReg The index of the segment register to use for
9716 * this access. The base and limits are checked.
9717 * @param GCPtrMem The address of the guest memory.
9718 * @param u16Value The value to store.
9719 */
9720IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9721{
9722 /* The lazy approach for now... */
9723 uint16_t *pu16Dst;
9724 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9725 if (rc == VINF_SUCCESS)
9726 {
9727 *pu16Dst = u16Value;
9728 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9729 }
9730 return rc;
9731}
9732
9733
9734#ifdef IEM_WITH_SETJMP
9735/**
9736 * Stores a data word, longjmp on error.
9737 *
9738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9739 * @param iSegReg The index of the segment register to use for
9740 * this access. The base and limits are checked.
9741 * @param GCPtrMem The address of the guest memory.
9742 * @param u16Value The value to store.
9743 */
9744IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9745{
9746 /* The lazy approach for now... */
9747 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9748 *pu16Dst = u16Value;
9749 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9750}
9751#endif
9752
9753
9754/**
9755 * Stores a data dword.
9756 *
9757 * @returns Strict VBox status code.
9758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9759 * @param iSegReg The index of the segment register to use for
9760 * this access. The base and limits are checked.
9761 * @param GCPtrMem The address of the guest memory.
9762 * @param u32Value The value to store.
9763 */
9764IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9765{
9766 /* The lazy approach for now... */
9767 uint32_t *pu32Dst;
9768 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9769 if (rc == VINF_SUCCESS)
9770 {
9771 *pu32Dst = u32Value;
9772 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9773 }
9774 return rc;
9775}
9776
9777
9778#ifdef IEM_WITH_SETJMP
9779/**
9780 * Stores a data dword.
9781 *
9782 * @returns Strict VBox status code.
9783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9784 * @param iSegReg The index of the segment register to use for
9785 * this access. The base and limits are checked.
9786 * @param GCPtrMem The address of the guest memory.
9787 * @param u32Value The value to store.
9788 */
9789IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9790{
9791 /* The lazy approach for now... */
9792 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9793 *pu32Dst = u32Value;
9794 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9795}
9796#endif
9797
9798
9799/**
9800 * Stores a data qword.
9801 *
9802 * @returns Strict VBox status code.
9803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9804 * @param iSegReg The index of the segment register to use for
9805 * this access. The base and limits are checked.
9806 * @param GCPtrMem The address of the guest memory.
9807 * @param u64Value The value to store.
9808 */
9809IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9810{
9811 /* The lazy approach for now... */
9812 uint64_t *pu64Dst;
9813 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9814 if (rc == VINF_SUCCESS)
9815 {
9816 *pu64Dst = u64Value;
9817 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9818 }
9819 return rc;
9820}
9821
9822
9823#ifdef IEM_WITH_SETJMP
9824/**
9825 * Stores a data qword, longjmp on error.
9826 *
9827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9828 * @param iSegReg The index of the segment register to use for
9829 * this access. The base and limits are checked.
9830 * @param GCPtrMem The address of the guest memory.
9831 * @param u64Value The value to store.
9832 */
9833IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9834{
9835 /* The lazy approach for now... */
9836 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9837 *pu64Dst = u64Value;
9838 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9839}
9840#endif
9841
9842
9843/**
9844 * Stores a data dqword.
9845 *
9846 * @returns Strict VBox status code.
9847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9848 * @param iSegReg The index of the segment register to use for
9849 * this access. The base and limits are checked.
9850 * @param GCPtrMem The address of the guest memory.
9851 * @param u128Value The value to store.
9852 */
9853IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9854{
9855 /* The lazy approach for now... */
9856 PRTUINT128U pu128Dst;
9857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9858 if (rc == VINF_SUCCESS)
9859 {
9860 pu128Dst->au64[0] = u128Value.au64[0];
9861 pu128Dst->au64[1] = u128Value.au64[1];
9862 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9863 }
9864 return rc;
9865}
9866
9867
9868#ifdef IEM_WITH_SETJMP
9869/**
9870 * Stores a data dqword, longjmp on error.
9871 *
9872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9873 * @param iSegReg The index of the segment register to use for
9874 * this access. The base and limits are checked.
9875 * @param GCPtrMem The address of the guest memory.
9876 * @param u128Value The value to store.
9877 */
9878IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9879{
9880 /* The lazy approach for now... */
9881 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9882 pu128Dst->au64[0] = u128Value.au64[0];
9883 pu128Dst->au64[1] = u128Value.au64[1];
9884 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9885}
9886#endif
9887
9888
9889/**
9890 * Stores a data dqword, SSE aligned.
9891 *
9892 * @returns Strict VBox status code.
9893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9894 * @param iSegReg The index of the segment register to use for
9895 * this access. The base and limits are checked.
9896 * @param GCPtrMem The address of the guest memory.
9897 * @param u128Value The value to store.
9898 */
9899IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9900{
9901 /* The lazy approach for now... */
9902 if ( (GCPtrMem & 15)
9903 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9904 return iemRaiseGeneralProtectionFault0(pVCpu);
9905
9906 PRTUINT128U pu128Dst;
9907 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9908 if (rc == VINF_SUCCESS)
9909 {
9910 pu128Dst->au64[0] = u128Value.au64[0];
9911 pu128Dst->au64[1] = u128Value.au64[1];
9912 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9913 }
9914 return rc;
9915}
9916
9917
9918#ifdef IEM_WITH_SETJMP
9919/**
9920 * Stores a data dqword, SSE aligned.
9921 *
9922 * @returns Strict VBox status code.
9923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9924 * @param iSegReg The index of the segment register to use for
9925 * this access. The base and limits are checked.
9926 * @param GCPtrMem The address of the guest memory.
9927 * @param u128Value The value to store.
9928 */
9929DECL_NO_INLINE(IEM_STATIC, void)
9930iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9931{
9932 /* The lazy approach for now... */
9933 if ( (GCPtrMem & 15) == 0
9934 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9935 {
9936 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9937 pu128Dst->au64[0] = u128Value.au64[0];
9938 pu128Dst->au64[1] = u128Value.au64[1];
9939 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9940 return;
9941 }
9942
9943 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9944 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9945}
9946#endif
9947
9948
9949/**
9950 * Stores a data dqword.
9951 *
9952 * @returns Strict VBox status code.
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param iSegReg The index of the segment register to use for
9955 * this access. The base and limits are checked.
9956 * @param GCPtrMem The address of the guest memory.
9957 * @param pu256Value Pointer to the value to store.
9958 */
9959IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
9960{
9961 /* The lazy approach for now... */
9962 PRTUINT256U pu256Dst;
9963 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9964 if (rc == VINF_SUCCESS)
9965 {
9966 pu256Dst->au64[0] = pu256Value->au64[0];
9967 pu256Dst->au64[1] = pu256Value->au64[1];
9968 pu256Dst->au64[2] = pu256Value->au64[2];
9969 pu256Dst->au64[3] = pu256Value->au64[3];
9970 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
9971 }
9972 return rc;
9973}
9974
9975
9976#ifdef IEM_WITH_SETJMP
9977/**
9978 * Stores a data dqword, longjmp on error.
9979 *
9980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9981 * @param iSegReg The index of the segment register to use for
9982 * this access. The base and limits are checked.
9983 * @param GCPtrMem The address of the guest memory.
9984 * @param pu256Value Pointer to the value to store.
9985 */
9986IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
9987{
9988 /* The lazy approach for now... */
9989 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9990 pu256Dst->au64[0] = pu256Value->au64[0];
9991 pu256Dst->au64[1] = pu256Value->au64[1];
9992 pu256Dst->au64[2] = pu256Value->au64[2];
9993 pu256Dst->au64[3] = pu256Value->au64[3];
9994 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
9995}
9996#endif
9997
9998
9999/**
10000 * Stores a data dqword, AVX aligned.
10001 *
10002 * @returns Strict VBox status code.
10003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10004 * @param iSegReg The index of the segment register to use for
10005 * this access. The base and limits are checked.
10006 * @param GCPtrMem The address of the guest memory.
10007 * @param pu256Value Pointer to the value to store.
10008 */
10009IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10010{
10011 /* The lazy approach for now... */
10012 if (GCPtrMem & 31)
10013 return iemRaiseGeneralProtectionFault0(pVCpu);
10014
10015 PRTUINT256U pu256Dst;
10016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10017 if (rc == VINF_SUCCESS)
10018 {
10019 pu256Dst->au64[0] = pu256Value->au64[0];
10020 pu256Dst->au64[1] = pu256Value->au64[1];
10021 pu256Dst->au64[2] = pu256Value->au64[2];
10022 pu256Dst->au64[3] = pu256Value->au64[3];
10023 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10024 }
10025 return rc;
10026}
10027
10028
10029#ifdef IEM_WITH_SETJMP
10030/**
10031 * Stores a data dqword, AVX aligned.
10032 *
10033 * @returns Strict VBox status code.
10034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10035 * @param iSegReg The index of the segment register to use for
10036 * this access. The base and limits are checked.
10037 * @param GCPtrMem The address of the guest memory.
10038 * @param pu256Value Pointer to the value to store.
10039 */
10040DECL_NO_INLINE(IEM_STATIC, void)
10041iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10042{
10043 /* The lazy approach for now... */
10044 if ((GCPtrMem & 31) == 0)
10045 {
10046 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10047 pu256Dst->au64[0] = pu256Value->au64[0];
10048 pu256Dst->au64[1] = pu256Value->au64[1];
10049 pu256Dst->au64[2] = pu256Value->au64[2];
10050 pu256Dst->au64[3] = pu256Value->au64[3];
10051 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10052 return;
10053 }
10054
10055 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10056 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10057}
10058#endif
10059
10060
10061/**
10062 * Stores a descriptor register (sgdt, sidt).
10063 *
10064 * @returns Strict VBox status code.
10065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10066 * @param cbLimit The limit.
10067 * @param GCPtrBase The base address.
10068 * @param iSegReg The index of the segment register to use for
10069 * this access. The base and limits are checked.
10070 * @param GCPtrMem The address of the guest memory.
10071 */
10072IEM_STATIC VBOXSTRICTRC
10073iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10074{
10075 /*
10076 * The SIDT and SGDT instructions actually stores the data using two
10077 * independent writes. The instructions does not respond to opsize prefixes.
10078 */
10079 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10080 if (rcStrict == VINF_SUCCESS)
10081 {
10082 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10083 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10084 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10085 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10086 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10087 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10088 else
10089 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10090 }
10091 return rcStrict;
10092}
10093
10094
10095/**
10096 * Pushes a word onto the stack.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10100 * @param u16Value The value to push.
10101 */
10102IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10103{
10104 /* Increment the stack pointer. */
10105 uint64_t uNewRsp;
10106 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10107
10108 /* Write the word the lazy way. */
10109 uint16_t *pu16Dst;
10110 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10111 if (rc == VINF_SUCCESS)
10112 {
10113 *pu16Dst = u16Value;
10114 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10115 }
10116
10117 /* Commit the new RSP value unless we an access handler made trouble. */
10118 if (rc == VINF_SUCCESS)
10119 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10120
10121 return rc;
10122}
10123
10124
10125/**
10126 * Pushes a dword onto the stack.
10127 *
10128 * @returns Strict VBox status code.
10129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10130 * @param u32Value The value to push.
10131 */
10132IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10133{
10134 /* Increment the stack pointer. */
10135 uint64_t uNewRsp;
10136 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10137
10138 /* Write the dword the lazy way. */
10139 uint32_t *pu32Dst;
10140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10141 if (rc == VINF_SUCCESS)
10142 {
10143 *pu32Dst = u32Value;
10144 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10145 }
10146
10147 /* Commit the new RSP value unless we an access handler made trouble. */
10148 if (rc == VINF_SUCCESS)
10149 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10150
10151 return rc;
10152}
10153
10154
10155/**
10156 * Pushes a dword segment register value onto the stack.
10157 *
10158 * @returns Strict VBox status code.
10159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10160 * @param u32Value The value to push.
10161 */
10162IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10163{
10164 /* Increment the stack pointer. */
10165 uint64_t uNewRsp;
10166 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10167
10168 /* The intel docs talks about zero extending the selector register
10169 value. My actual intel CPU here might be zero extending the value
10170 but it still only writes the lower word... */
10171 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10172 * happens when crossing an electric page boundrary, is the high word checked
10173 * for write accessibility or not? Probably it is. What about segment limits?
10174 * It appears this behavior is also shared with trap error codes.
10175 *
10176 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10177 * ancient hardware when it actually did change. */
10178 uint16_t *pu16Dst;
10179 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10180 if (rc == VINF_SUCCESS)
10181 {
10182 *pu16Dst = (uint16_t)u32Value;
10183 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10184 }
10185
10186 /* Commit the new RSP value unless we an access handler made trouble. */
10187 if (rc == VINF_SUCCESS)
10188 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10189
10190 return rc;
10191}
10192
10193
10194/**
10195 * Pushes a qword onto the stack.
10196 *
10197 * @returns Strict VBox status code.
10198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10199 * @param u64Value The value to push.
10200 */
10201IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10202{
10203 /* Increment the stack pointer. */
10204 uint64_t uNewRsp;
10205 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10206
10207 /* Write the word the lazy way. */
10208 uint64_t *pu64Dst;
10209 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10210 if (rc == VINF_SUCCESS)
10211 {
10212 *pu64Dst = u64Value;
10213 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10214 }
10215
10216 /* Commit the new RSP value unless we an access handler made trouble. */
10217 if (rc == VINF_SUCCESS)
10218 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10219
10220 return rc;
10221}
10222
10223
10224/**
10225 * Pops a word from the stack.
10226 *
10227 * @returns Strict VBox status code.
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param pu16Value Where to store the popped value.
10230 */
10231IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10232{
10233 /* Increment the stack pointer. */
10234 uint64_t uNewRsp;
10235 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10236
10237 /* Write the word the lazy way. */
10238 uint16_t const *pu16Src;
10239 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10240 if (rc == VINF_SUCCESS)
10241 {
10242 *pu16Value = *pu16Src;
10243 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10244
10245 /* Commit the new RSP value. */
10246 if (rc == VINF_SUCCESS)
10247 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10248 }
10249
10250 return rc;
10251}
10252
10253
10254/**
10255 * Pops a dword from the stack.
10256 *
10257 * @returns Strict VBox status code.
10258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10259 * @param pu32Value Where to store the popped value.
10260 */
10261IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10262{
10263 /* Increment the stack pointer. */
10264 uint64_t uNewRsp;
10265 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10266
10267 /* Write the word the lazy way. */
10268 uint32_t const *pu32Src;
10269 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10270 if (rc == VINF_SUCCESS)
10271 {
10272 *pu32Value = *pu32Src;
10273 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10274
10275 /* Commit the new RSP value. */
10276 if (rc == VINF_SUCCESS)
10277 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10278 }
10279
10280 return rc;
10281}
10282
10283
10284/**
10285 * Pops a qword from the stack.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10289 * @param pu64Value Where to store the popped value.
10290 */
10291IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10292{
10293 /* Increment the stack pointer. */
10294 uint64_t uNewRsp;
10295 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10296
10297 /* Write the word the lazy way. */
10298 uint64_t const *pu64Src;
10299 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10300 if (rc == VINF_SUCCESS)
10301 {
10302 *pu64Value = *pu64Src;
10303 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10304
10305 /* Commit the new RSP value. */
10306 if (rc == VINF_SUCCESS)
10307 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10308 }
10309
10310 return rc;
10311}
10312
10313
10314/**
10315 * Pushes a word onto the stack, using a temporary stack pointer.
10316 *
10317 * @returns Strict VBox status code.
10318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10319 * @param u16Value The value to push.
10320 * @param pTmpRsp Pointer to the temporary stack pointer.
10321 */
10322IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10323{
10324 /* Increment the stack pointer. */
10325 RTUINT64U NewRsp = *pTmpRsp;
10326 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10327
10328 /* Write the word the lazy way. */
10329 uint16_t *pu16Dst;
10330 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10331 if (rc == VINF_SUCCESS)
10332 {
10333 *pu16Dst = u16Value;
10334 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10335 }
10336
10337 /* Commit the new RSP value unless we an access handler made trouble. */
10338 if (rc == VINF_SUCCESS)
10339 *pTmpRsp = NewRsp;
10340
10341 return rc;
10342}
10343
10344
10345/**
10346 * Pushes a dword onto the stack, using a temporary stack pointer.
10347 *
10348 * @returns Strict VBox status code.
10349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10350 * @param u32Value The value to push.
10351 * @param pTmpRsp Pointer to the temporary stack pointer.
10352 */
10353IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10354{
10355 /* Increment the stack pointer. */
10356 RTUINT64U NewRsp = *pTmpRsp;
10357 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10358
10359 /* Write the word the lazy way. */
10360 uint32_t *pu32Dst;
10361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10362 if (rc == VINF_SUCCESS)
10363 {
10364 *pu32Dst = u32Value;
10365 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10366 }
10367
10368 /* Commit the new RSP value unless we an access handler made trouble. */
10369 if (rc == VINF_SUCCESS)
10370 *pTmpRsp = NewRsp;
10371
10372 return rc;
10373}
10374
10375
10376/**
10377 * Pushes a dword onto the stack, using a temporary stack pointer.
10378 *
10379 * @returns Strict VBox status code.
10380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10381 * @param u64Value The value to push.
10382 * @param pTmpRsp Pointer to the temporary stack pointer.
10383 */
10384IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10385{
10386 /* Increment the stack pointer. */
10387 RTUINT64U NewRsp = *pTmpRsp;
10388 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10389
10390 /* Write the word the lazy way. */
10391 uint64_t *pu64Dst;
10392 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10393 if (rc == VINF_SUCCESS)
10394 {
10395 *pu64Dst = u64Value;
10396 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10397 }
10398
10399 /* Commit the new RSP value unless we an access handler made trouble. */
10400 if (rc == VINF_SUCCESS)
10401 *pTmpRsp = NewRsp;
10402
10403 return rc;
10404}
10405
10406
10407/**
10408 * Pops a word from the stack, using a temporary stack pointer.
10409 *
10410 * @returns Strict VBox status code.
10411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10412 * @param pu16Value Where to store the popped value.
10413 * @param pTmpRsp Pointer to the temporary stack pointer.
10414 */
10415IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10416{
10417 /* Increment the stack pointer. */
10418 RTUINT64U NewRsp = *pTmpRsp;
10419 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10420
10421 /* Write the word the lazy way. */
10422 uint16_t const *pu16Src;
10423 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10424 if (rc == VINF_SUCCESS)
10425 {
10426 *pu16Value = *pu16Src;
10427 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10428
10429 /* Commit the new RSP value. */
10430 if (rc == VINF_SUCCESS)
10431 *pTmpRsp = NewRsp;
10432 }
10433
10434 return rc;
10435}
10436
10437
10438/**
10439 * Pops a dword from the stack, using a temporary stack pointer.
10440 *
10441 * @returns Strict VBox status code.
10442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10443 * @param pu32Value Where to store the popped value.
10444 * @param pTmpRsp Pointer to the temporary stack pointer.
10445 */
10446IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10447{
10448 /* Increment the stack pointer. */
10449 RTUINT64U NewRsp = *pTmpRsp;
10450 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10451
10452 /* Write the word the lazy way. */
10453 uint32_t const *pu32Src;
10454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10455 if (rc == VINF_SUCCESS)
10456 {
10457 *pu32Value = *pu32Src;
10458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10459
10460 /* Commit the new RSP value. */
10461 if (rc == VINF_SUCCESS)
10462 *pTmpRsp = NewRsp;
10463 }
10464
10465 return rc;
10466}
10467
10468
10469/**
10470 * Pops a qword from the stack, using a temporary stack pointer.
10471 *
10472 * @returns Strict VBox status code.
10473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10474 * @param pu64Value Where to store the popped value.
10475 * @param pTmpRsp Pointer to the temporary stack pointer.
10476 */
10477IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10478{
10479 /* Increment the stack pointer. */
10480 RTUINT64U NewRsp = *pTmpRsp;
10481 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10482
10483 /* Write the word the lazy way. */
10484 uint64_t const *pu64Src;
10485 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10486 if (rcStrict == VINF_SUCCESS)
10487 {
10488 *pu64Value = *pu64Src;
10489 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10490
10491 /* Commit the new RSP value. */
10492 if (rcStrict == VINF_SUCCESS)
10493 *pTmpRsp = NewRsp;
10494 }
10495
10496 return rcStrict;
10497}
10498
10499
10500/**
10501 * Begin a special stack push (used by interrupt, exceptions and such).
10502 *
10503 * This will raise \#SS or \#PF if appropriate.
10504 *
10505 * @returns Strict VBox status code.
10506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10507 * @param cbMem The number of bytes to push onto the stack.
10508 * @param ppvMem Where to return the pointer to the stack memory.
10509 * As with the other memory functions this could be
10510 * direct access or bounce buffered access, so
10511 * don't commit register until the commit call
10512 * succeeds.
10513 * @param puNewRsp Where to return the new RSP value. This must be
10514 * passed unchanged to
10515 * iemMemStackPushCommitSpecial().
10516 */
10517IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10518{
10519 Assert(cbMem < UINT8_MAX);
10520 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10521 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10522}
10523
10524
10525/**
10526 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10527 *
10528 * This will update the rSP.
10529 *
10530 * @returns Strict VBox status code.
10531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10532 * @param pvMem The pointer returned by
10533 * iemMemStackPushBeginSpecial().
10534 * @param uNewRsp The new RSP value returned by
10535 * iemMemStackPushBeginSpecial().
10536 */
10537IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10538{
10539 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10540 if (rcStrict == VINF_SUCCESS)
10541 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10542 return rcStrict;
10543}
10544
10545
10546/**
10547 * Begin a special stack pop (used by iret, retf and such).
10548 *
10549 * This will raise \#SS or \#PF if appropriate.
10550 *
10551 * @returns Strict VBox status code.
10552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10553 * @param cbMem The number of bytes to pop from the stack.
10554 * @param ppvMem Where to return the pointer to the stack memory.
10555 * @param puNewRsp Where to return the new RSP value. This must be
10556 * assigned to CPUMCTX::rsp manually some time
10557 * after iemMemStackPopDoneSpecial() has been
10558 * called.
10559 */
10560IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10561{
10562 Assert(cbMem < UINT8_MAX);
10563 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10564 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10565}
10566
10567
10568/**
10569 * Continue a special stack pop (used by iret and retf).
10570 *
10571 * This will raise \#SS or \#PF if appropriate.
10572 *
10573 * @returns Strict VBox status code.
10574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10575 * @param cbMem The number of bytes to pop from the stack.
10576 * @param ppvMem Where to return the pointer to the stack memory.
10577 * @param puNewRsp Where to return the new RSP value. This must be
10578 * assigned to CPUMCTX::rsp manually some time
10579 * after iemMemStackPopDoneSpecial() has been
10580 * called.
10581 */
10582IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10583{
10584 Assert(cbMem < UINT8_MAX);
10585 RTUINT64U NewRsp;
10586 NewRsp.u = *puNewRsp;
10587 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10588 *puNewRsp = NewRsp.u;
10589 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10590}
10591
10592
10593/**
10594 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10595 * iemMemStackPopContinueSpecial).
10596 *
10597 * The caller will manually commit the rSP.
10598 *
10599 * @returns Strict VBox status code.
10600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10601 * @param pvMem The pointer returned by
10602 * iemMemStackPopBeginSpecial() or
10603 * iemMemStackPopContinueSpecial().
10604 */
10605IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10606{
10607 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10608}
10609
10610
10611/**
10612 * Fetches a system table byte.
10613 *
10614 * @returns Strict VBox status code.
10615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10616 * @param pbDst Where to return the byte.
10617 * @param iSegReg The index of the segment register to use for
10618 * this access. The base and limits are checked.
10619 * @param GCPtrMem The address of the guest memory.
10620 */
10621IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10622{
10623 /* The lazy approach for now... */
10624 uint8_t const *pbSrc;
10625 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10626 if (rc == VINF_SUCCESS)
10627 {
10628 *pbDst = *pbSrc;
10629 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10630 }
10631 return rc;
10632}
10633
10634
10635/**
10636 * Fetches a system table word.
10637 *
10638 * @returns Strict VBox status code.
10639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10640 * @param pu16Dst Where to return the word.
10641 * @param iSegReg The index of the segment register to use for
10642 * this access. The base and limits are checked.
10643 * @param GCPtrMem The address of the guest memory.
10644 */
10645IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10646{
10647 /* The lazy approach for now... */
10648 uint16_t const *pu16Src;
10649 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10650 if (rc == VINF_SUCCESS)
10651 {
10652 *pu16Dst = *pu16Src;
10653 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10654 }
10655 return rc;
10656}
10657
10658
10659/**
10660 * Fetches a system table dword.
10661 *
10662 * @returns Strict VBox status code.
10663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10664 * @param pu32Dst Where to return the dword.
10665 * @param iSegReg The index of the segment register to use for
10666 * this access. The base and limits are checked.
10667 * @param GCPtrMem The address of the guest memory.
10668 */
10669IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10670{
10671 /* The lazy approach for now... */
10672 uint32_t const *pu32Src;
10673 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10674 if (rc == VINF_SUCCESS)
10675 {
10676 *pu32Dst = *pu32Src;
10677 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10678 }
10679 return rc;
10680}
10681
10682
10683/**
10684 * Fetches a system table qword.
10685 *
10686 * @returns Strict VBox status code.
10687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10688 * @param pu64Dst Where to return the qword.
10689 * @param iSegReg The index of the segment register to use for
10690 * this access. The base and limits are checked.
10691 * @param GCPtrMem The address of the guest memory.
10692 */
10693IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10694{
10695 /* The lazy approach for now... */
10696 uint64_t const *pu64Src;
10697 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10698 if (rc == VINF_SUCCESS)
10699 {
10700 *pu64Dst = *pu64Src;
10701 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10702 }
10703 return rc;
10704}
10705
10706
10707/**
10708 * Fetches a descriptor table entry with caller specified error code.
10709 *
10710 * @returns Strict VBox status code.
10711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10712 * @param pDesc Where to return the descriptor table entry.
10713 * @param uSel The selector which table entry to fetch.
10714 * @param uXcpt The exception to raise on table lookup error.
10715 * @param uErrorCode The error code associated with the exception.
10716 */
10717IEM_STATIC VBOXSTRICTRC
10718iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10719{
10720 AssertPtr(pDesc);
10721 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10722
10723 /** @todo did the 286 require all 8 bytes to be accessible? */
10724 /*
10725 * Get the selector table base and check bounds.
10726 */
10727 RTGCPTR GCPtrBase;
10728 if (uSel & X86_SEL_LDT)
10729 {
10730 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10731 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10732 {
10733 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10734 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10735 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10736 uErrorCode, 0);
10737 }
10738
10739 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10740 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10741 }
10742 else
10743 {
10744 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10745 {
10746 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10747 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10748 uErrorCode, 0);
10749 }
10750 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10751 }
10752
10753 /*
10754 * Read the legacy descriptor and maybe the long mode extensions if
10755 * required.
10756 */
10757 VBOXSTRICTRC rcStrict;
10758 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10759 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10760 else
10761 {
10762 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10763 if (rcStrict == VINF_SUCCESS)
10764 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10765 if (rcStrict == VINF_SUCCESS)
10766 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10767 if (rcStrict == VINF_SUCCESS)
10768 pDesc->Legacy.au16[3] = 0;
10769 else
10770 return rcStrict;
10771 }
10772
10773 if (rcStrict == VINF_SUCCESS)
10774 {
10775 if ( !IEM_IS_LONG_MODE(pVCpu)
10776 || pDesc->Legacy.Gen.u1DescType)
10777 pDesc->Long.au64[1] = 0;
10778 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10779 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10780 else
10781 {
10782 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10783 /** @todo is this the right exception? */
10784 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10785 }
10786 }
10787 return rcStrict;
10788}
10789
10790
10791/**
10792 * Fetches a descriptor table entry.
10793 *
10794 * @returns Strict VBox status code.
10795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10796 * @param pDesc Where to return the descriptor table entry.
10797 * @param uSel The selector which table entry to fetch.
10798 * @param uXcpt The exception to raise on table lookup error.
10799 */
10800IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10801{
10802 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10803}
10804
10805
10806/**
10807 * Fakes a long mode stack selector for SS = 0.
10808 *
10809 * @param pDescSs Where to return the fake stack descriptor.
10810 * @param uDpl The DPL we want.
10811 */
10812IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10813{
10814 pDescSs->Long.au64[0] = 0;
10815 pDescSs->Long.au64[1] = 0;
10816 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10817 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10818 pDescSs->Long.Gen.u2Dpl = uDpl;
10819 pDescSs->Long.Gen.u1Present = 1;
10820 pDescSs->Long.Gen.u1Long = 1;
10821}
10822
10823
10824/**
10825 * Marks the selector descriptor as accessed (only non-system descriptors).
10826 *
10827 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10828 * will therefore skip the limit checks.
10829 *
10830 * @returns Strict VBox status code.
10831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10832 * @param uSel The selector.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10835{
10836 /*
10837 * Get the selector table base and calculate the entry address.
10838 */
10839 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10840 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10841 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10842 GCPtr += uSel & X86_SEL_MASK;
10843
10844 /*
10845 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10846 * ugly stuff to avoid this. This will make sure it's an atomic access
10847 * as well more or less remove any question about 8-bit or 32-bit accesss.
10848 */
10849 VBOXSTRICTRC rcStrict;
10850 uint32_t volatile *pu32;
10851 if ((GCPtr & 3) == 0)
10852 {
10853 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10854 GCPtr += 2 + 2;
10855 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10856 if (rcStrict != VINF_SUCCESS)
10857 return rcStrict;
10858 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10859 }
10860 else
10861 {
10862 /* The misaligned GDT/LDT case, map the whole thing. */
10863 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10864 if (rcStrict != VINF_SUCCESS)
10865 return rcStrict;
10866 switch ((uintptr_t)pu32 & 3)
10867 {
10868 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10869 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10870 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10871 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10872 }
10873 }
10874
10875 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10876}
10877
10878/** @} */
10879
10880
10881/*
10882 * Include the C/C++ implementation of instruction.
10883 */
10884#include "IEMAllCImpl.cpp.h"
10885
10886
10887
10888/** @name "Microcode" macros.
10889 *
10890 * The idea is that we should be able to use the same code to interpret
10891 * instructions as well as recompiler instructions. Thus this obfuscation.
10892 *
10893 * @{
10894 */
10895#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10896#define IEM_MC_END() }
10897#define IEM_MC_PAUSE() do {} while (0)
10898#define IEM_MC_CONTINUE() do {} while (0)
10899
10900/** Internal macro. */
10901#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10902 do \
10903 { \
10904 VBOXSTRICTRC rcStrict2 = a_Expr; \
10905 if (rcStrict2 != VINF_SUCCESS) \
10906 return rcStrict2; \
10907 } while (0)
10908
10909
10910#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10911#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10912#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10913#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10914#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10915#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10916#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10917#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10918#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10919 do { \
10920 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10921 return iemRaiseDeviceNotAvailable(pVCpu); \
10922 } while (0)
10923#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10924 do { \
10925 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10926 return iemRaiseDeviceNotAvailable(pVCpu); \
10927 } while (0)
10928#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10929 do { \
10930 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10931 return iemRaiseMathFault(pVCpu); \
10932 } while (0)
10933#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
10934 do { \
10935 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
10936 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
10937 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
10938 return iemRaiseUndefinedOpcode(pVCpu); \
10939 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10940 return iemRaiseDeviceNotAvailable(pVCpu); \
10941 } while (0)
10942#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
10943 do { \
10944 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
10945 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
10946 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
10947 return iemRaiseUndefinedOpcode(pVCpu); \
10948 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10949 return iemRaiseDeviceNotAvailable(pVCpu); \
10950 } while (0)
10951#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
10952 do { \
10953 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10954 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10955 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
10956 return iemRaiseUndefinedOpcode(pVCpu); \
10957 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10958 return iemRaiseDeviceNotAvailable(pVCpu); \
10959 } while (0)
10960#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10961 do { \
10962 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10963 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10964 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10965 return iemRaiseUndefinedOpcode(pVCpu); \
10966 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10967 return iemRaiseDeviceNotAvailable(pVCpu); \
10968 } while (0)
10969#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10970 do { \
10971 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10972 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10973 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10974 return iemRaiseUndefinedOpcode(pVCpu); \
10975 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10976 return iemRaiseDeviceNotAvailable(pVCpu); \
10977 } while (0)
10978#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10979 do { \
10980 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10981 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10982 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10983 return iemRaiseUndefinedOpcode(pVCpu); \
10984 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10985 return iemRaiseDeviceNotAvailable(pVCpu); \
10986 } while (0)
10987#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10988 do { \
10989 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10990 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10991 return iemRaiseUndefinedOpcode(pVCpu); \
10992 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10993 return iemRaiseDeviceNotAvailable(pVCpu); \
10994 } while (0)
10995#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10996 do { \
10997 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10998 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10999 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11000 return iemRaiseUndefinedOpcode(pVCpu); \
11001 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11002 return iemRaiseDeviceNotAvailable(pVCpu); \
11003 } while (0)
11004#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11005 do { \
11006 if (pVCpu->iem.s.uCpl != 0) \
11007 return iemRaiseGeneralProtectionFault0(pVCpu); \
11008 } while (0)
11009#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11010 do { \
11011 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11012 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11013 } while (0)
11014#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11015 do { \
11016 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11017 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11018 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11019 return iemRaiseUndefinedOpcode(pVCpu); \
11020 } while (0)
11021#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11022 do { \
11023 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11024 return iemRaiseGeneralProtectionFault0(pVCpu); \
11025 } while (0)
11026
11027
11028#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11029#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11030#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11031#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11032#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11033#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11034#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11035 uint32_t a_Name; \
11036 uint32_t *a_pName = &a_Name
11037#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11038 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11039
11040#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11041#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11042
11043#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11044#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11045#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11046#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11047#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11048#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11049#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11050#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11051#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11052#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11053#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11054#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11055#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11056#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11057#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11058#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11059#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11060#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11061 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11062 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11063 } while (0)
11064#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11065 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11066 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11067 } while (0)
11068#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11069 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11070 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11071 } while (0)
11072/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11073#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11074 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11075 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11076 } while (0)
11077#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11078 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11079 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11080 } while (0)
11081/** @note Not for IOPL or IF testing or modification. */
11082#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11083#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11084#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11085#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11086
11087#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11088#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11089#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11090#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11091#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11092#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11093#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11094#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11095#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11096#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11097/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11098#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11099 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11100 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11101 } while (0)
11102#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11103 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11104 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11105 } while (0)
11106#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11107 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11108
11109
11110#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11111#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11112/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11113 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11114#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11115#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11116/** @note Not for IOPL or IF testing or modification. */
11117#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11118
11119#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11120#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11121#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11122 do { \
11123 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11124 *pu32Reg += (a_u32Value); \
11125 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11126 } while (0)
11127#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11128
11129#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11130#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11131#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11132 do { \
11133 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11134 *pu32Reg -= (a_u32Value); \
11135 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11136 } while (0)
11137#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11138#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11139
11140#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11141#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11142#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11143#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11144#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11145#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11146#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11147
11148#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11149#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11150#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11151#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11152
11153#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11154#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11155#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11156
11157#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11158#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11159#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11160
11161#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11162#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11163#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11164
11165#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11166#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11167#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11168
11169#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11170
11171#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11172
11173#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11174#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11175#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11176 do { \
11177 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11178 *pu32Reg &= (a_u32Value); \
11179 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11180 } while (0)
11181#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11182
11183#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11184#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11185#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11186 do { \
11187 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11188 *pu32Reg |= (a_u32Value); \
11189 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11190 } while (0)
11191#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11192
11193
11194/** @note Not for IOPL or IF modification. */
11195#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11196/** @note Not for IOPL or IF modification. */
11197#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11198/** @note Not for IOPL or IF modification. */
11199#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11200
11201#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11202
11203/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11204#define IEM_MC_FPU_TO_MMX_MODE() do { \
11205 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11206 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11207 } while (0)
11208
11209/** Switches the FPU state from MMX mode (FTW=0xffff). */
11210#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11211 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11212 } while (0)
11213
11214#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11215 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11216#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11217 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11218#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11219 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11220 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11221 } while (0)
11222#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11223 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11224 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11225 } while (0)
11226#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11227 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11228#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11229 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11230#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11231 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11232
11233#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11234 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11235 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11236 } while (0)
11237#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11238 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11239#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11240 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11241#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11242 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11243#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11244 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11245 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11246 } while (0)
11247#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11248 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11249#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11250 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11251 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11252 } while (0)
11253#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11254 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11255#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11256 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11257 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11258 } while (0)
11259#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11260 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11261#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11262 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11263#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11264 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11265#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11266 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11267#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11268 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11269 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11270 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11271 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11272 } while (0)
11273
11274#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11275 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11276 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11277 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11278 } while (0)
11279#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11280 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11281 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11282 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11283 } while (0)
11284#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11285 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11286 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11287 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11288 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11289 } while (0)
11290#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11291 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11292 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11293 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11294 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11295 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11296 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11297 } while (0)
11298
11299#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11300#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11301 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11302 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11303 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11304 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11305 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11306 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11307 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11308 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11309 } while (0)
11310#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11311 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11312 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11313 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11314 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11315 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11316 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11317 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11318 } while (0)
11319#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11320 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11321 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11322 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11323 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11324 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11325 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11326 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11327 } while (0)
11328#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11329 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11330 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11331 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11332 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11333 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11334 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11335 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11336 } while (0)
11337
11338#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11339 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11340#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11341 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11342#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11343 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11344#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11345 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11346 uintptr_t const iYRegTmp = (a_iYReg); \
11347 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11348 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11349 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11350 } while (0)
11351
11352#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11353 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11354 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11355 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11356 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11357 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11358 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11359 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11360 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11361 } while (0)
11362#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11363 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11364 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11365 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11366 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11367 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11368 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11369 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11370 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11371 } while (0)
11372#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11373 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11374 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11375 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11376 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11377 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11378 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11379 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11380 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11381 } while (0)
11382
11383#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11384 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11385 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11386 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11387 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11388 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11389 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11390 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11391 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11392 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11393 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11394 } while (0)
11395#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11396 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11397 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11398 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11399 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11400 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11401 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11402 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11403 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11404 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11405 } while (0)
11406#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11407 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11408 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11409 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11410 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11411 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11412 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11413 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11414 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11415 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11416 } while (0)
11417#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11418 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11419 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11420 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11421 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11422 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11423 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11424 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11425 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11426 } while (0)
11427
11428#ifndef IEM_WITH_SETJMP
11429# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11430 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11431# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11432 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11433# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11434 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11435#else
11436# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11437 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11438# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11439 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11440# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11441 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11442#endif
11443
11444#ifndef IEM_WITH_SETJMP
11445# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11446 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11447# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11448 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11449# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11450 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11451#else
11452# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11453 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11454# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11455 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11456# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11457 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11458#endif
11459
11460#ifndef IEM_WITH_SETJMP
11461# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11462 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11463# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11464 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11465# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11466 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11467#else
11468# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11469 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11470# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11471 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11472# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11473 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11474#endif
11475
11476#ifdef SOME_UNUSED_FUNCTION
11477# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11478 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11479#endif
11480
11481#ifndef IEM_WITH_SETJMP
11482# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11484# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11485 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11486# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11487 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11488# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11489 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11490#else
11491# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11492 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11493# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11494 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11495# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11496 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11497# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11498 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11499#endif
11500
11501#ifndef IEM_WITH_SETJMP
11502# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11503 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11504# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11505 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11506# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11507 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11508#else
11509# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11510 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11511# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11512 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11513# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11514 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11515#endif
11516
11517#ifndef IEM_WITH_SETJMP
11518# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11519 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11520# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11522#else
11523# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11524 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11525# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11526 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11527#endif
11528
11529#ifndef IEM_WITH_SETJMP
11530# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11531 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11532# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11533 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11534#else
11535# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11536 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11537# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11538 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11539#endif
11540
11541
11542
11543#ifndef IEM_WITH_SETJMP
11544# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11545 do { \
11546 uint8_t u8Tmp; \
11547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11548 (a_u16Dst) = u8Tmp; \
11549 } while (0)
11550# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11551 do { \
11552 uint8_t u8Tmp; \
11553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11554 (a_u32Dst) = u8Tmp; \
11555 } while (0)
11556# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11557 do { \
11558 uint8_t u8Tmp; \
11559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11560 (a_u64Dst) = u8Tmp; \
11561 } while (0)
11562# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11563 do { \
11564 uint16_t u16Tmp; \
11565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11566 (a_u32Dst) = u16Tmp; \
11567 } while (0)
11568# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11569 do { \
11570 uint16_t u16Tmp; \
11571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11572 (a_u64Dst) = u16Tmp; \
11573 } while (0)
11574# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11575 do { \
11576 uint32_t u32Tmp; \
11577 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11578 (a_u64Dst) = u32Tmp; \
11579 } while (0)
11580#else /* IEM_WITH_SETJMP */
11581# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11582 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11583# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11584 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11585# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11586 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11587# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11588 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11589# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11590 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11591# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11592 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11593#endif /* IEM_WITH_SETJMP */
11594
11595#ifndef IEM_WITH_SETJMP
11596# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11597 do { \
11598 uint8_t u8Tmp; \
11599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11600 (a_u16Dst) = (int8_t)u8Tmp; \
11601 } while (0)
11602# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11603 do { \
11604 uint8_t u8Tmp; \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11606 (a_u32Dst) = (int8_t)u8Tmp; \
11607 } while (0)
11608# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11609 do { \
11610 uint8_t u8Tmp; \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11612 (a_u64Dst) = (int8_t)u8Tmp; \
11613 } while (0)
11614# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11615 do { \
11616 uint16_t u16Tmp; \
11617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11618 (a_u32Dst) = (int16_t)u16Tmp; \
11619 } while (0)
11620# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11621 do { \
11622 uint16_t u16Tmp; \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11624 (a_u64Dst) = (int16_t)u16Tmp; \
11625 } while (0)
11626# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11627 do { \
11628 uint32_t u32Tmp; \
11629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11630 (a_u64Dst) = (int32_t)u32Tmp; \
11631 } while (0)
11632#else /* IEM_WITH_SETJMP */
11633# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11634 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11636 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11637# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11640 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11642 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11643# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11644 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11645#endif /* IEM_WITH_SETJMP */
11646
11647#ifndef IEM_WITH_SETJMP
11648# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11650# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11652# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11653 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11654# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11655 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11656#else
11657# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11658 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11659# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11660 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11661# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11662 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11663# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11664 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11665#endif
11666
11667#ifndef IEM_WITH_SETJMP
11668# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11670# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11671 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11672# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11673 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11674# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11676#else
11677# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11678 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11679# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11680 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11681# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11682 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11683# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11684 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11685#endif
11686
11687#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11688#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11689#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11690#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11691#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11692#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11693#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11694 do { \
11695 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11696 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11697 } while (0)
11698
11699#ifndef IEM_WITH_SETJMP
11700# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11702# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11703 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11704#else
11705# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11706 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11707# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11708 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11709#endif
11710
11711#ifndef IEM_WITH_SETJMP
11712# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11714# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11716#else
11717# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11718 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11719# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11720 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11721#endif
11722
11723
11724#define IEM_MC_PUSH_U16(a_u16Value) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11726#define IEM_MC_PUSH_U32(a_u32Value) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11728#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11730#define IEM_MC_PUSH_U64(a_u64Value) \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11732
11733#define IEM_MC_POP_U16(a_pu16Value) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11735#define IEM_MC_POP_U32(a_pu32Value) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11737#define IEM_MC_POP_U64(a_pu64Value) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11739
11740/** Maps guest memory for direct or bounce buffered access.
11741 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11742 * @remarks May return.
11743 */
11744#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11746
11747/** Maps guest memory for direct or bounce buffered access.
11748 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11749 * @remarks May return.
11750 */
11751#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11753
11754/** Commits the memory and unmaps the guest memory.
11755 * @remarks May return.
11756 */
11757#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11759
11760/** Commits the memory and unmaps the guest memory unless the FPU status word
11761 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11762 * that would cause FLD not to store.
11763 *
11764 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11765 * store, while \#P will not.
11766 *
11767 * @remarks May in theory return - for now.
11768 */
11769#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11770 do { \
11771 if ( !(a_u16FSW & X86_FSW_ES) \
11772 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11773 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11775 } while (0)
11776
11777/** Calculate efficient address from R/M. */
11778#ifndef IEM_WITH_SETJMP
11779# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11780 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11781#else
11782# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11783 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11784#endif
11785
11786#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11787#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11788#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11789#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11790#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11791#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11792#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11793
11794/**
11795 * Defers the rest of the instruction emulation to a C implementation routine
11796 * and returns, only taking the standard parameters.
11797 *
11798 * @param a_pfnCImpl The pointer to the C routine.
11799 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11800 */
11801#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11802
11803/**
11804 * Defers the rest of instruction emulation to a C implementation routine and
11805 * returns, taking one argument in addition to the standard ones.
11806 *
11807 * @param a_pfnCImpl The pointer to the C routine.
11808 * @param a0 The argument.
11809 */
11810#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11811
11812/**
11813 * Defers the rest of the instruction emulation to a C implementation routine
11814 * and returns, taking two arguments in addition to the standard ones.
11815 *
11816 * @param a_pfnCImpl The pointer to the C routine.
11817 * @param a0 The first extra argument.
11818 * @param a1 The second extra argument.
11819 */
11820#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11821
11822/**
11823 * Defers the rest of the instruction emulation to a C implementation routine
11824 * and returns, taking three arguments in addition to the standard ones.
11825 *
11826 * @param a_pfnCImpl The pointer to the C routine.
11827 * @param a0 The first extra argument.
11828 * @param a1 The second extra argument.
11829 * @param a2 The third extra argument.
11830 */
11831#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11832
11833/**
11834 * Defers the rest of the instruction emulation to a C implementation routine
11835 * and returns, taking four arguments in addition to the standard ones.
11836 *
11837 * @param a_pfnCImpl The pointer to the C routine.
11838 * @param a0 The first extra argument.
11839 * @param a1 The second extra argument.
11840 * @param a2 The third extra argument.
11841 * @param a3 The fourth extra argument.
11842 */
11843#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11844
11845/**
11846 * Defers the rest of the instruction emulation to a C implementation routine
11847 * and returns, taking two arguments in addition to the standard ones.
11848 *
11849 * @param a_pfnCImpl The pointer to the C routine.
11850 * @param a0 The first extra argument.
11851 * @param a1 The second extra argument.
11852 * @param a2 The third extra argument.
11853 * @param a3 The fourth extra argument.
11854 * @param a4 The fifth extra argument.
11855 */
11856#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11857
11858/**
11859 * Defers the entire instruction emulation to a C implementation routine and
11860 * returns, only taking the standard parameters.
11861 *
11862 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11863 *
11864 * @param a_pfnCImpl The pointer to the C routine.
11865 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11866 */
11867#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11868
11869/**
11870 * Defers the entire instruction emulation to a C implementation routine and
11871 * returns, taking one argument in addition to the standard ones.
11872 *
11873 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11874 *
11875 * @param a_pfnCImpl The pointer to the C routine.
11876 * @param a0 The argument.
11877 */
11878#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11879
11880/**
11881 * Defers the entire instruction emulation to a C implementation routine and
11882 * returns, taking two arguments in addition to the standard ones.
11883 *
11884 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11885 *
11886 * @param a_pfnCImpl The pointer to the C routine.
11887 * @param a0 The first extra argument.
11888 * @param a1 The second extra argument.
11889 */
11890#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11891
11892/**
11893 * Defers the entire instruction emulation to a C implementation routine and
11894 * returns, taking three arguments in addition to the standard ones.
11895 *
11896 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11897 *
11898 * @param a_pfnCImpl The pointer to the C routine.
11899 * @param a0 The first extra argument.
11900 * @param a1 The second extra argument.
11901 * @param a2 The third extra argument.
11902 */
11903#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11904
11905/**
11906 * Calls a FPU assembly implementation taking one visible argument.
11907 *
11908 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11909 * @param a0 The first extra argument.
11910 */
11911#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11912 do { \
11913 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
11914 } while (0)
11915
11916/**
11917 * Calls a FPU assembly implementation taking two visible arguments.
11918 *
11919 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11920 * @param a0 The first extra argument.
11921 * @param a1 The second extra argument.
11922 */
11923#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11924 do { \
11925 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
11926 } while (0)
11927
11928/**
11929 * Calls a FPU assembly implementation taking three visible arguments.
11930 *
11931 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11932 * @param a0 The first extra argument.
11933 * @param a1 The second extra argument.
11934 * @param a2 The third extra argument.
11935 */
11936#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11937 do { \
11938 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11939 } while (0)
11940
11941#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11942 do { \
11943 (a_FpuData).FSW = (a_FSW); \
11944 (a_FpuData).r80Result = *(a_pr80Value); \
11945 } while (0)
11946
11947/** Pushes FPU result onto the stack. */
11948#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11949 iemFpuPushResult(pVCpu, &a_FpuData)
11950/** Pushes FPU result onto the stack and sets the FPUDP. */
11951#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11952 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11953
11954/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11955#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11956 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11957
11958/** Stores FPU result in a stack register. */
11959#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11960 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11961/** Stores FPU result in a stack register and pops the stack. */
11962#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11963 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11964/** Stores FPU result in a stack register and sets the FPUDP. */
11965#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11966 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11967/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11968 * stack. */
11969#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11970 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11971
11972/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11973#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11974 iemFpuUpdateOpcodeAndIp(pVCpu)
11975/** Free a stack register (for FFREE and FFREEP). */
11976#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11977 iemFpuStackFree(pVCpu, a_iStReg)
11978/** Increment the FPU stack pointer. */
11979#define IEM_MC_FPU_STACK_INC_TOP() \
11980 iemFpuStackIncTop(pVCpu)
11981/** Decrement the FPU stack pointer. */
11982#define IEM_MC_FPU_STACK_DEC_TOP() \
11983 iemFpuStackDecTop(pVCpu)
11984
11985/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11986#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11987 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11988/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11989#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11990 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11991/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11992#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11993 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11994/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11995#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11996 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11997/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11998 * stack. */
11999#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12000 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12001/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12002#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12003 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12004
12005/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12006#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12007 iemFpuStackUnderflow(pVCpu, a_iStDst)
12008/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12009 * stack. */
12010#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12011 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12012/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12013 * FPUDS. */
12014#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12015 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12016/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12017 * FPUDS. Pops stack. */
12018#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12019 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12020/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12021 * stack twice. */
12022#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12023 iemFpuStackUnderflowThenPopPop(pVCpu)
12024/** Raises a FPU stack underflow exception for an instruction pushing a result
12025 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12026#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12027 iemFpuStackPushUnderflow(pVCpu)
12028/** Raises a FPU stack underflow exception for an instruction pushing a result
12029 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12030#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12031 iemFpuStackPushUnderflowTwo(pVCpu)
12032
12033/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12034 * FPUIP, FPUCS and FOP. */
12035#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12036 iemFpuStackPushOverflow(pVCpu)
12037/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12038 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12039#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12040 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12041/** Prepares for using the FPU state.
12042 * Ensures that we can use the host FPU in the current context (RC+R0.
12043 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12044#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12045/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12046#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12047/** Actualizes the guest FPU state so it can be accessed and modified. */
12048#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12049
12050/** Prepares for using the SSE state.
12051 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12052 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12053#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12054/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12055#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12056/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12057#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12058
12059/** Prepares for using the AVX state.
12060 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12061 * Ensures the guest AVX state in the CPUMCTX is up to date.
12062 * @note This will include the AVX512 state too when support for it is added
12063 * due to the zero extending feature of VEX instruction. */
12064#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12065/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12066#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12067/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12068#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12069
12070/**
12071 * Calls a MMX assembly implementation taking two visible arguments.
12072 *
12073 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12074 * @param a0 The first extra argument.
12075 * @param a1 The second extra argument.
12076 */
12077#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12078 do { \
12079 IEM_MC_PREPARE_FPU_USAGE(); \
12080 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12081 } while (0)
12082
12083/**
12084 * Calls a MMX assembly implementation taking three visible arguments.
12085 *
12086 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12087 * @param a0 The first extra argument.
12088 * @param a1 The second extra argument.
12089 * @param a2 The third extra argument.
12090 */
12091#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12092 do { \
12093 IEM_MC_PREPARE_FPU_USAGE(); \
12094 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12095 } while (0)
12096
12097
12098/**
12099 * Calls a SSE assembly implementation taking two visible arguments.
12100 *
12101 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12102 * @param a0 The first extra argument.
12103 * @param a1 The second extra argument.
12104 */
12105#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12106 do { \
12107 IEM_MC_PREPARE_SSE_USAGE(); \
12108 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12109 } while (0)
12110
12111/**
12112 * Calls a SSE assembly implementation taking three visible arguments.
12113 *
12114 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12115 * @param a0 The first extra argument.
12116 * @param a1 The second extra argument.
12117 * @param a2 The third extra argument.
12118 */
12119#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12120 do { \
12121 IEM_MC_PREPARE_SSE_USAGE(); \
12122 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12123 } while (0)
12124
12125
12126/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12127 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12128#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12129 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12130
12131/**
12132 * Calls a AVX assembly implementation taking two visible arguments.
12133 *
12134 * There is one implicit zero'th argument, a pointer to the extended state.
12135 *
12136 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12137 * @param a1 The first extra argument.
12138 * @param a2 The second extra argument.
12139 */
12140#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12141 do { \
12142 IEM_MC_PREPARE_AVX_USAGE(); \
12143 a_pfnAImpl(pXState, (a1), (a2)); \
12144 } while (0)
12145
12146/**
12147 * Calls a AVX assembly implementation taking three visible arguments.
12148 *
12149 * There is one implicit zero'th argument, a pointer to the extended state.
12150 *
12151 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12152 * @param a1 The first extra argument.
12153 * @param a2 The second extra argument.
12154 * @param a3 The third extra argument.
12155 */
12156#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12157 do { \
12158 IEM_MC_PREPARE_AVX_USAGE(); \
12159 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12160 } while (0)
12161
12162/** @note Not for IOPL or IF testing. */
12163#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12164/** @note Not for IOPL or IF testing. */
12165#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12166/** @note Not for IOPL or IF testing. */
12167#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12168/** @note Not for IOPL or IF testing. */
12169#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12170/** @note Not for IOPL or IF testing. */
12171#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12172 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12173 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12174/** @note Not for IOPL or IF testing. */
12175#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12176 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12177 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12178/** @note Not for IOPL or IF testing. */
12179#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12180 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12181 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12182 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12183/** @note Not for IOPL or IF testing. */
12184#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12185 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12186 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12187 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12188#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12189#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12190#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12191/** @note Not for IOPL or IF testing. */
12192#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12193 if ( pVCpu->cpum.GstCtx.cx != 0 \
12194 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12195/** @note Not for IOPL or IF testing. */
12196#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12197 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12198 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12199/** @note Not for IOPL or IF testing. */
12200#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12201 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12202 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12203/** @note Not for IOPL or IF testing. */
12204#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12205 if ( pVCpu->cpum.GstCtx.cx != 0 \
12206 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12207/** @note Not for IOPL or IF testing. */
12208#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12209 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12210 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12211/** @note Not for IOPL or IF testing. */
12212#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12213 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12214 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12215#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12216#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12217
12218#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12219 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12220#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12221 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12222#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12223 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12224#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12225 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12226#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12227 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12228#define IEM_MC_IF_FCW_IM() \
12229 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12230
12231#define IEM_MC_ELSE() } else {
12232#define IEM_MC_ENDIF() } do {} while (0)
12233
12234/** @} */
12235
12236
12237/** @name Opcode Debug Helpers.
12238 * @{
12239 */
12240#ifdef VBOX_WITH_STATISTICS
12241# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12242#else
12243# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12244#endif
12245
12246#ifdef DEBUG
12247# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12248 do { \
12249 IEMOP_INC_STATS(a_Stats); \
12250 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12251 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12252 } while (0)
12253
12254# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12255 do { \
12256 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12257 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12258 (void)RT_CONCAT(OP_,a_Upper); \
12259 (void)(a_fDisHints); \
12260 (void)(a_fIemHints); \
12261 } while (0)
12262
12263# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12264 do { \
12265 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12266 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12267 (void)RT_CONCAT(OP_,a_Upper); \
12268 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12269 (void)(a_fDisHints); \
12270 (void)(a_fIemHints); \
12271 } while (0)
12272
12273# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12274 do { \
12275 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12276 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12277 (void)RT_CONCAT(OP_,a_Upper); \
12278 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12279 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12280 (void)(a_fDisHints); \
12281 (void)(a_fIemHints); \
12282 } while (0)
12283
12284# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12285 do { \
12286 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12287 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12288 (void)RT_CONCAT(OP_,a_Upper); \
12289 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12290 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12291 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12292 (void)(a_fDisHints); \
12293 (void)(a_fIemHints); \
12294 } while (0)
12295
12296# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12297 do { \
12298 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12299 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12300 (void)RT_CONCAT(OP_,a_Upper); \
12301 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12302 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12303 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12304 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12305 (void)(a_fDisHints); \
12306 (void)(a_fIemHints); \
12307 } while (0)
12308
12309#else
12310# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12311
12312# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12313 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12314# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12315 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12316# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12317 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12318# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12319 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12320# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12321 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12322
12323#endif
12324
12325#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12326 IEMOP_MNEMONIC0EX(a_Lower, \
12327 #a_Lower, \
12328 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12329#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12330 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12331 #a_Lower " " #a_Op1, \
12332 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12333#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12334 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12335 #a_Lower " " #a_Op1 "," #a_Op2, \
12336 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12337#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12338 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12339 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12340 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12341#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12342 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12343 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12344 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12345
12346/** @} */
12347
12348
12349/** @name Opcode Helpers.
12350 * @{
12351 */
12352
12353#ifdef IN_RING3
12354# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12355 do { \
12356 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12357 else \
12358 { \
12359 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12360 return IEMOP_RAISE_INVALID_OPCODE(); \
12361 } \
12362 } while (0)
12363#else
12364# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12365 do { \
12366 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12367 else return IEMOP_RAISE_INVALID_OPCODE(); \
12368 } while (0)
12369#endif
12370
12371/** The instruction requires a 186 or later. */
12372#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12373# define IEMOP_HLP_MIN_186() do { } while (0)
12374#else
12375# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12376#endif
12377
12378/** The instruction requires a 286 or later. */
12379#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12380# define IEMOP_HLP_MIN_286() do { } while (0)
12381#else
12382# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12383#endif
12384
12385/** The instruction requires a 386 or later. */
12386#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12387# define IEMOP_HLP_MIN_386() do { } while (0)
12388#else
12389# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12390#endif
12391
12392/** The instruction requires a 386 or later if the given expression is true. */
12393#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12394# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12395#else
12396# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12397#endif
12398
12399/** The instruction requires a 486 or later. */
12400#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12401# define IEMOP_HLP_MIN_486() do { } while (0)
12402#else
12403# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12404#endif
12405
12406/** The instruction requires a Pentium (586) or later. */
12407#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12408# define IEMOP_HLP_MIN_586() do { } while (0)
12409#else
12410# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12411#endif
12412
12413/** The instruction requires a PentiumPro (686) or later. */
12414#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12415# define IEMOP_HLP_MIN_686() do { } while (0)
12416#else
12417# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12418#endif
12419
12420
12421/** The instruction raises an \#UD in real and V8086 mode. */
12422#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12423 do \
12424 { \
12425 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12426 else return IEMOP_RAISE_INVALID_OPCODE(); \
12427 } while (0)
12428
12429/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12430 * 64-bit mode. */
12431#define IEMOP_HLP_NO_64BIT() \
12432 do \
12433 { \
12434 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12435 return IEMOP_RAISE_INVALID_OPCODE(); \
12436 } while (0)
12437
12438/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12439 * 64-bit mode. */
12440#define IEMOP_HLP_ONLY_64BIT() \
12441 do \
12442 { \
12443 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12444 return IEMOP_RAISE_INVALID_OPCODE(); \
12445 } while (0)
12446
12447/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12448#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12449 do \
12450 { \
12451 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12452 iemRecalEffOpSize64Default(pVCpu); \
12453 } while (0)
12454
12455/** The instruction has 64-bit operand size if 64-bit mode. */
12456#define IEMOP_HLP_64BIT_OP_SIZE() \
12457 do \
12458 { \
12459 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12460 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12461 } while (0)
12462
12463/** Only a REX prefix immediately preceeding the first opcode byte takes
12464 * effect. This macro helps ensuring this as well as logging bad guest code. */
12465#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12466 do \
12467 { \
12468 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12469 { \
12470 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12471 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12472 pVCpu->iem.s.uRexB = 0; \
12473 pVCpu->iem.s.uRexIndex = 0; \
12474 pVCpu->iem.s.uRexReg = 0; \
12475 iemRecalEffOpSize(pVCpu); \
12476 } \
12477 } while (0)
12478
12479/**
12480 * Done decoding.
12481 */
12482#define IEMOP_HLP_DONE_DECODING() \
12483 do \
12484 { \
12485 /*nothing for now, maybe later... */ \
12486 } while (0)
12487
12488/**
12489 * Done decoding, raise \#UD exception if lock prefix present.
12490 */
12491#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12492 do \
12493 { \
12494 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12495 { /* likely */ } \
12496 else \
12497 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12498 } while (0)
12499
12500
12501/**
12502 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12503 * repnz or size prefixes are present, or if in real or v8086 mode.
12504 */
12505#define IEMOP_HLP_DONE_VEX_DECODING() \
12506 do \
12507 { \
12508 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12509 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12510 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12511 { /* likely */ } \
12512 else \
12513 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12514 } while (0)
12515
12516/**
12517 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12518 * repnz or size prefixes are present, or if in real or v8086 mode.
12519 */
12520#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12521 do \
12522 { \
12523 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12524 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12525 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12526 && pVCpu->iem.s.uVexLength == 0)) \
12527 { /* likely */ } \
12528 else \
12529 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12530 } while (0)
12531
12532
12533/**
12534 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12535 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12536 * register 0, or if in real or v8086 mode.
12537 */
12538#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12539 do \
12540 { \
12541 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12542 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12543 && !pVCpu->iem.s.uVex3rdReg \
12544 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12545 { /* likely */ } \
12546 else \
12547 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12548 } while (0)
12549
12550/**
12551 * Done decoding VEX, no V, L=0.
12552 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12553 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12554 */
12555#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12556 do \
12557 { \
12558 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12559 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12560 && pVCpu->iem.s.uVexLength == 0 \
12561 && pVCpu->iem.s.uVex3rdReg == 0 \
12562 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12563 { /* likely */ } \
12564 else \
12565 return IEMOP_RAISE_INVALID_OPCODE(); \
12566 } while (0)
12567
12568#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12569 do \
12570 { \
12571 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12572 { /* likely */ } \
12573 else \
12574 { \
12575 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12576 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12577 } \
12578 } while (0)
12579#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12580 do \
12581 { \
12582 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12583 { /* likely */ } \
12584 else \
12585 { \
12586 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12587 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12588 } \
12589 } while (0)
12590
12591/**
12592 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12593 * are present.
12594 */
12595#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12596 do \
12597 { \
12598 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12599 { /* likely */ } \
12600 else \
12601 return IEMOP_RAISE_INVALID_OPCODE(); \
12602 } while (0)
12603
12604
12605/**
12606 * Calculates the effective address of a ModR/M memory operand.
12607 *
12608 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12609 *
12610 * @return Strict VBox status code.
12611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12612 * @param bRm The ModRM byte.
12613 * @param cbImm The size of any immediate following the
12614 * effective address opcode bytes. Important for
12615 * RIP relative addressing.
12616 * @param pGCPtrEff Where to return the effective address.
12617 */
12618IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12619{
12620 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12621# define SET_SS_DEF() \
12622 do \
12623 { \
12624 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12625 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12626 } while (0)
12627
12628 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12629 {
12630/** @todo Check the effective address size crap! */
12631 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12632 {
12633 uint16_t u16EffAddr;
12634
12635 /* Handle the disp16 form with no registers first. */
12636 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12637 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12638 else
12639 {
12640 /* Get the displacment. */
12641 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12642 {
12643 case 0: u16EffAddr = 0; break;
12644 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12645 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12646 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12647 }
12648
12649 /* Add the base and index registers to the disp. */
12650 switch (bRm & X86_MODRM_RM_MASK)
12651 {
12652 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12653 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12654 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12655 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12656 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12657 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12658 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12659 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12660 }
12661 }
12662
12663 *pGCPtrEff = u16EffAddr;
12664 }
12665 else
12666 {
12667 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12668 uint32_t u32EffAddr;
12669
12670 /* Handle the disp32 form with no registers first. */
12671 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12672 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12673 else
12674 {
12675 /* Get the register (or SIB) value. */
12676 switch ((bRm & X86_MODRM_RM_MASK))
12677 {
12678 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12679 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12680 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12681 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12682 case 4: /* SIB */
12683 {
12684 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12685
12686 /* Get the index and scale it. */
12687 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12688 {
12689 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12690 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12691 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12692 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12693 case 4: u32EffAddr = 0; /*none */ break;
12694 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12695 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12696 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12698 }
12699 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12700
12701 /* add base */
12702 switch (bSib & X86_SIB_BASE_MASK)
12703 {
12704 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12705 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12706 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12707 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12708 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12709 case 5:
12710 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12711 {
12712 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12713 SET_SS_DEF();
12714 }
12715 else
12716 {
12717 uint32_t u32Disp;
12718 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12719 u32EffAddr += u32Disp;
12720 }
12721 break;
12722 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12723 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12724 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12725 }
12726 break;
12727 }
12728 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12729 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12730 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12731 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12732 }
12733
12734 /* Get and add the displacement. */
12735 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12736 {
12737 case 0:
12738 break;
12739 case 1:
12740 {
12741 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12742 u32EffAddr += i8Disp;
12743 break;
12744 }
12745 case 2:
12746 {
12747 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12748 u32EffAddr += u32Disp;
12749 break;
12750 }
12751 default:
12752 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12753 }
12754
12755 }
12756 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12757 *pGCPtrEff = u32EffAddr;
12758 else
12759 {
12760 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12761 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12762 }
12763 }
12764 }
12765 else
12766 {
12767 uint64_t u64EffAddr;
12768
12769 /* Handle the rip+disp32 form with no registers first. */
12770 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12771 {
12772 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12773 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12774 }
12775 else
12776 {
12777 /* Get the register (or SIB) value. */
12778 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12779 {
12780 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12781 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12782 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12783 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12784 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12785 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12786 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12787 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12788 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12789 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12790 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12791 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12792 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12793 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12794 /* SIB */
12795 case 4:
12796 case 12:
12797 {
12798 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12799
12800 /* Get the index and scale it. */
12801 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12802 {
12803 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12804 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12805 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12806 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12807 case 4: u64EffAddr = 0; /*none */ break;
12808 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12809 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12810 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12811 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12812 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12813 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12814 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12815 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
12816 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12817 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12818 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12819 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12820 }
12821 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12822
12823 /* add base */
12824 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12825 {
12826 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
12827 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
12828 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
12829 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
12830 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
12831 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
12832 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
12833 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
12834 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
12835 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
12836 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
12837 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
12838 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
12839 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
12840 /* complicated encodings */
12841 case 5:
12842 case 13:
12843 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12844 {
12845 if (!pVCpu->iem.s.uRexB)
12846 {
12847 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
12848 SET_SS_DEF();
12849 }
12850 else
12851 u64EffAddr += pVCpu->cpum.GstCtx.r13;
12852 }
12853 else
12854 {
12855 uint32_t u32Disp;
12856 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12857 u64EffAddr += (int32_t)u32Disp;
12858 }
12859 break;
12860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12861 }
12862 break;
12863 }
12864 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12865 }
12866
12867 /* Get and add the displacement. */
12868 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12869 {
12870 case 0:
12871 break;
12872 case 1:
12873 {
12874 int8_t i8Disp;
12875 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12876 u64EffAddr += i8Disp;
12877 break;
12878 }
12879 case 2:
12880 {
12881 uint32_t u32Disp;
12882 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12883 u64EffAddr += (int32_t)u32Disp;
12884 break;
12885 }
12886 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12887 }
12888
12889 }
12890
12891 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12892 *pGCPtrEff = u64EffAddr;
12893 else
12894 {
12895 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12896 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12897 }
12898 }
12899
12900 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12901 return VINF_SUCCESS;
12902}
12903
12904
12905/**
12906 * Calculates the effective address of a ModR/M memory operand.
12907 *
12908 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12909 *
12910 * @return Strict VBox status code.
12911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12912 * @param bRm The ModRM byte.
12913 * @param cbImm The size of any immediate following the
12914 * effective address opcode bytes. Important for
12915 * RIP relative addressing.
12916 * @param pGCPtrEff Where to return the effective address.
12917 * @param offRsp RSP displacement.
12918 */
12919IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12920{
12921 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12922# define SET_SS_DEF() \
12923 do \
12924 { \
12925 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12926 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12927 } while (0)
12928
12929 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12930 {
12931/** @todo Check the effective address size crap! */
12932 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12933 {
12934 uint16_t u16EffAddr;
12935
12936 /* Handle the disp16 form with no registers first. */
12937 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12938 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12939 else
12940 {
12941 /* Get the displacment. */
12942 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12943 {
12944 case 0: u16EffAddr = 0; break;
12945 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12946 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12947 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12948 }
12949
12950 /* Add the base and index registers to the disp. */
12951 switch (bRm & X86_MODRM_RM_MASK)
12952 {
12953 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12954 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12955 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12956 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12957 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12958 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12959 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12960 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12961 }
12962 }
12963
12964 *pGCPtrEff = u16EffAddr;
12965 }
12966 else
12967 {
12968 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12969 uint32_t u32EffAddr;
12970
12971 /* Handle the disp32 form with no registers first. */
12972 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12973 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12974 else
12975 {
12976 /* Get the register (or SIB) value. */
12977 switch ((bRm & X86_MODRM_RM_MASK))
12978 {
12979 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12980 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12981 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12982 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12983 case 4: /* SIB */
12984 {
12985 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12986
12987 /* Get the index and scale it. */
12988 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12989 {
12990 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12991 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12992 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12993 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12994 case 4: u32EffAddr = 0; /*none */ break;
12995 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12996 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12997 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12998 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12999 }
13000 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13001
13002 /* add base */
13003 switch (bSib & X86_SIB_BASE_MASK)
13004 {
13005 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13006 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13007 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13008 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13009 case 4:
13010 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13011 SET_SS_DEF();
13012 break;
13013 case 5:
13014 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13015 {
13016 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13017 SET_SS_DEF();
13018 }
13019 else
13020 {
13021 uint32_t u32Disp;
13022 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13023 u32EffAddr += u32Disp;
13024 }
13025 break;
13026 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13027 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13028 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13029 }
13030 break;
13031 }
13032 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13033 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13034 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13035 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13036 }
13037
13038 /* Get and add the displacement. */
13039 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13040 {
13041 case 0:
13042 break;
13043 case 1:
13044 {
13045 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13046 u32EffAddr += i8Disp;
13047 break;
13048 }
13049 case 2:
13050 {
13051 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13052 u32EffAddr += u32Disp;
13053 break;
13054 }
13055 default:
13056 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13057 }
13058
13059 }
13060 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13061 *pGCPtrEff = u32EffAddr;
13062 else
13063 {
13064 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13065 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13066 }
13067 }
13068 }
13069 else
13070 {
13071 uint64_t u64EffAddr;
13072
13073 /* Handle the rip+disp32 form with no registers first. */
13074 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13075 {
13076 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13077 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13078 }
13079 else
13080 {
13081 /* Get the register (or SIB) value. */
13082 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13083 {
13084 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13085 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13086 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13087 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13088 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13089 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13090 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13091 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13092 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13093 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13094 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13095 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13096 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13097 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13098 /* SIB */
13099 case 4:
13100 case 12:
13101 {
13102 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13103
13104 /* Get the index and scale it. */
13105 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13106 {
13107 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13108 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13109 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13110 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13111 case 4: u64EffAddr = 0; /*none */ break;
13112 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13113 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13114 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13115 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13116 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13117 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13118 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13119 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13120 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13121 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13122 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13123 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13124 }
13125 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13126
13127 /* add base */
13128 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13129 {
13130 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13131 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13132 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13133 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13134 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13135 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13136 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13137 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13138 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13139 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13140 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13141 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13142 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13143 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13144 /* complicated encodings */
13145 case 5:
13146 case 13:
13147 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13148 {
13149 if (!pVCpu->iem.s.uRexB)
13150 {
13151 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13152 SET_SS_DEF();
13153 }
13154 else
13155 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13156 }
13157 else
13158 {
13159 uint32_t u32Disp;
13160 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13161 u64EffAddr += (int32_t)u32Disp;
13162 }
13163 break;
13164 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13165 }
13166 break;
13167 }
13168 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13169 }
13170
13171 /* Get and add the displacement. */
13172 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13173 {
13174 case 0:
13175 break;
13176 case 1:
13177 {
13178 int8_t i8Disp;
13179 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13180 u64EffAddr += i8Disp;
13181 break;
13182 }
13183 case 2:
13184 {
13185 uint32_t u32Disp;
13186 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13187 u64EffAddr += (int32_t)u32Disp;
13188 break;
13189 }
13190 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13191 }
13192
13193 }
13194
13195 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13196 *pGCPtrEff = u64EffAddr;
13197 else
13198 {
13199 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13200 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13201 }
13202 }
13203
13204 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13205 return VINF_SUCCESS;
13206}
13207
13208
13209#ifdef IEM_WITH_SETJMP
13210/**
13211 * Calculates the effective address of a ModR/M memory operand.
13212 *
13213 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13214 *
13215 * May longjmp on internal error.
13216 *
13217 * @return The effective address.
13218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13219 * @param bRm The ModRM byte.
13220 * @param cbImm The size of any immediate following the
13221 * effective address opcode bytes. Important for
13222 * RIP relative addressing.
13223 */
13224IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13225{
13226 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13227# define SET_SS_DEF() \
13228 do \
13229 { \
13230 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13231 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13232 } while (0)
13233
13234 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13235 {
13236/** @todo Check the effective address size crap! */
13237 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13238 {
13239 uint16_t u16EffAddr;
13240
13241 /* Handle the disp16 form with no registers first. */
13242 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13243 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13244 else
13245 {
13246 /* Get the displacment. */
13247 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13248 {
13249 case 0: u16EffAddr = 0; break;
13250 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13251 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13252 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13253 }
13254
13255 /* Add the base and index registers to the disp. */
13256 switch (bRm & X86_MODRM_RM_MASK)
13257 {
13258 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13259 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13260 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13261 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13262 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13263 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13264 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13265 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13266 }
13267 }
13268
13269 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13270 return u16EffAddr;
13271 }
13272
13273 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13274 uint32_t u32EffAddr;
13275
13276 /* Handle the disp32 form with no registers first. */
13277 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13278 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13279 else
13280 {
13281 /* Get the register (or SIB) value. */
13282 switch ((bRm & X86_MODRM_RM_MASK))
13283 {
13284 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13285 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13286 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13287 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13288 case 4: /* SIB */
13289 {
13290 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13291
13292 /* Get the index and scale it. */
13293 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13294 {
13295 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13296 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13297 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13298 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13299 case 4: u32EffAddr = 0; /*none */ break;
13300 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13301 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13302 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13303 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13304 }
13305 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13306
13307 /* add base */
13308 switch (bSib & X86_SIB_BASE_MASK)
13309 {
13310 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13311 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13312 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13313 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13314 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13315 case 5:
13316 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13317 {
13318 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13319 SET_SS_DEF();
13320 }
13321 else
13322 {
13323 uint32_t u32Disp;
13324 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13325 u32EffAddr += u32Disp;
13326 }
13327 break;
13328 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13329 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13330 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13331 }
13332 break;
13333 }
13334 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13335 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13336 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13337 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13338 }
13339
13340 /* Get and add the displacement. */
13341 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13342 {
13343 case 0:
13344 break;
13345 case 1:
13346 {
13347 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13348 u32EffAddr += i8Disp;
13349 break;
13350 }
13351 case 2:
13352 {
13353 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13354 u32EffAddr += u32Disp;
13355 break;
13356 }
13357 default:
13358 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13359 }
13360 }
13361
13362 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13363 {
13364 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13365 return u32EffAddr;
13366 }
13367 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13368 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13369 return u32EffAddr & UINT16_MAX;
13370 }
13371
13372 uint64_t u64EffAddr;
13373
13374 /* Handle the rip+disp32 form with no registers first. */
13375 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13376 {
13377 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13378 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13379 }
13380 else
13381 {
13382 /* Get the register (or SIB) value. */
13383 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13384 {
13385 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13386 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13387 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13388 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13389 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13390 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13391 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13392 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13393 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13394 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13395 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13396 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13397 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13398 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13399 /* SIB */
13400 case 4:
13401 case 12:
13402 {
13403 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13404
13405 /* Get the index and scale it. */
13406 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13407 {
13408 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13409 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13410 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13411 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13412 case 4: u64EffAddr = 0; /*none */ break;
13413 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13414 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13415 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13416 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13417 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13418 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13419 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13420 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13421 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13422 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13423 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13424 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13425 }
13426 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13427
13428 /* add base */
13429 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13430 {
13431 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13432 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13433 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13434 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13435 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13436 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13437 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13438 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13439 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13440 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13441 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13442 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13443 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13444 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13445 /* complicated encodings */
13446 case 5:
13447 case 13:
13448 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13449 {
13450 if (!pVCpu->iem.s.uRexB)
13451 {
13452 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13453 SET_SS_DEF();
13454 }
13455 else
13456 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13457 }
13458 else
13459 {
13460 uint32_t u32Disp;
13461 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13462 u64EffAddr += (int32_t)u32Disp;
13463 }
13464 break;
13465 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13466 }
13467 break;
13468 }
13469 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13470 }
13471
13472 /* Get and add the displacement. */
13473 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13474 {
13475 case 0:
13476 break;
13477 case 1:
13478 {
13479 int8_t i8Disp;
13480 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13481 u64EffAddr += i8Disp;
13482 break;
13483 }
13484 case 2:
13485 {
13486 uint32_t u32Disp;
13487 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13488 u64EffAddr += (int32_t)u32Disp;
13489 break;
13490 }
13491 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13492 }
13493
13494 }
13495
13496 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13497 {
13498 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13499 return u64EffAddr;
13500 }
13501 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13502 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13503 return u64EffAddr & UINT32_MAX;
13504}
13505#endif /* IEM_WITH_SETJMP */
13506
13507/** @} */
13508
13509
13510
13511/*
13512 * Include the instructions
13513 */
13514#include "IEMAllInstructions.cpp.h"
13515
13516
13517
13518#ifdef LOG_ENABLED
13519/**
13520 * Logs the current instruction.
13521 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13522 * @param fSameCtx Set if we have the same context information as the VMM,
13523 * clear if we may have already executed an instruction in
13524 * our debug context. When clear, we assume IEMCPU holds
13525 * valid CPU mode info.
13526 *
13527 * The @a fSameCtx parameter is now misleading and obsolete.
13528 * @param pszFunction The IEM function doing the execution.
13529 */
13530IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13531{
13532# ifdef IN_RING3
13533 if (LogIs2Enabled())
13534 {
13535 char szInstr[256];
13536 uint32_t cbInstr = 0;
13537 if (fSameCtx)
13538 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13539 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13540 szInstr, sizeof(szInstr), &cbInstr);
13541 else
13542 {
13543 uint32_t fFlags = 0;
13544 switch (pVCpu->iem.s.enmCpuMode)
13545 {
13546 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13547 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13548 case IEMMODE_16BIT:
13549 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13550 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13551 else
13552 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13553 break;
13554 }
13555 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13556 szInstr, sizeof(szInstr), &cbInstr);
13557 }
13558
13559 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13560 Log2(("**** %s\n"
13561 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13562 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13563 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13564 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13565 " %s\n"
13566 , pszFunction,
13567 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13568 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13569 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13570 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13571 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13572 szInstr));
13573
13574 if (LogIs3Enabled())
13575 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13576 }
13577 else
13578# endif
13579 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13580 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13581 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13582}
13583#endif /* LOG_ENABLED */
13584
13585
13586/**
13587 * Makes status code addjustments (pass up from I/O and access handler)
13588 * as well as maintaining statistics.
13589 *
13590 * @returns Strict VBox status code to pass up.
13591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13592 * @param rcStrict The status from executing an instruction.
13593 */
13594DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13595{
13596 if (rcStrict != VINF_SUCCESS)
13597 {
13598 if (RT_SUCCESS(rcStrict))
13599 {
13600 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13601 || rcStrict == VINF_IOM_R3_IOPORT_READ
13602 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13603 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13604 || rcStrict == VINF_IOM_R3_MMIO_READ
13605 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13606 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13607 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13608 || rcStrict == VINF_CPUM_R3_MSR_READ
13609 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13610 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13611 || rcStrict == VINF_EM_RAW_TO_R3
13612 || rcStrict == VINF_EM_TRIPLE_FAULT
13613 || rcStrict == VINF_GIM_R3_HYPERCALL
13614 /* raw-mode / virt handlers only: */
13615 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13616 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13617 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13618 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13619 || rcStrict == VINF_SELM_SYNC_GDT
13620 || rcStrict == VINF_CSAM_PENDING_ACTION
13621 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13622 /* nested hw.virt codes: */
13623 || rcStrict == VINF_SVM_VMEXIT
13624 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13625/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13626 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13627#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13628 if ( rcStrict == VINF_SVM_VMEXIT
13629 && rcPassUp == VINF_SUCCESS)
13630 rcStrict = VINF_SUCCESS;
13631 else
13632#endif
13633 if (rcPassUp == VINF_SUCCESS)
13634 pVCpu->iem.s.cRetInfStatuses++;
13635 else if ( rcPassUp < VINF_EM_FIRST
13636 || rcPassUp > VINF_EM_LAST
13637 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13638 {
13639 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13640 pVCpu->iem.s.cRetPassUpStatus++;
13641 rcStrict = rcPassUp;
13642 }
13643 else
13644 {
13645 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13646 pVCpu->iem.s.cRetInfStatuses++;
13647 }
13648 }
13649 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13650 pVCpu->iem.s.cRetAspectNotImplemented++;
13651 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13652 pVCpu->iem.s.cRetInstrNotImplemented++;
13653 else
13654 pVCpu->iem.s.cRetErrStatuses++;
13655 }
13656 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13657 {
13658 pVCpu->iem.s.cRetPassUpStatus++;
13659 rcStrict = pVCpu->iem.s.rcPassUp;
13660 }
13661
13662 return rcStrict;
13663}
13664
13665
13666/**
13667 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13668 * IEMExecOneWithPrefetchedByPC.
13669 *
13670 * Similar code is found in IEMExecLots.
13671 *
13672 * @return Strict VBox status code.
13673 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13674 * @param fExecuteInhibit If set, execute the instruction following CLI,
13675 * POP SS and MOV SS,GR.
13676 * @param pszFunction The calling function name.
13677 */
13678DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13679{
13680 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13681 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13682 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13683 RT_NOREF_PV(pszFunction);
13684
13685#ifdef IEM_WITH_SETJMP
13686 VBOXSTRICTRC rcStrict;
13687 jmp_buf JmpBuf;
13688 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13689 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13690 if ((rcStrict = setjmp(JmpBuf)) == 0)
13691 {
13692 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13693 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13694 }
13695 else
13696 pVCpu->iem.s.cLongJumps++;
13697 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13698#else
13699 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13700 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13701#endif
13702 if (rcStrict == VINF_SUCCESS)
13703 pVCpu->iem.s.cInstructions++;
13704 if (pVCpu->iem.s.cActiveMappings > 0)
13705 {
13706 Assert(rcStrict != VINF_SUCCESS);
13707 iemMemRollback(pVCpu);
13708 }
13709 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13710 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13711 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13712
13713//#ifdef DEBUG
13714// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13715//#endif
13716
13717 /* Execute the next instruction as well if a cli, pop ss or
13718 mov ss, Gr has just completed successfully. */
13719 if ( fExecuteInhibit
13720 && rcStrict == VINF_SUCCESS
13721 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13722 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13723 {
13724 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13725 if (rcStrict == VINF_SUCCESS)
13726 {
13727#ifdef LOG_ENABLED
13728 iemLogCurInstr(pVCpu, false, pszFunction);
13729#endif
13730#ifdef IEM_WITH_SETJMP
13731 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13732 if ((rcStrict = setjmp(JmpBuf)) == 0)
13733 {
13734 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13735 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13736 }
13737 else
13738 pVCpu->iem.s.cLongJumps++;
13739 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13740#else
13741 IEM_OPCODE_GET_NEXT_U8(&b);
13742 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13743#endif
13744 if (rcStrict == VINF_SUCCESS)
13745 pVCpu->iem.s.cInstructions++;
13746 if (pVCpu->iem.s.cActiveMappings > 0)
13747 {
13748 Assert(rcStrict != VINF_SUCCESS);
13749 iemMemRollback(pVCpu);
13750 }
13751 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13752 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13753 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13754 }
13755 else if (pVCpu->iem.s.cActiveMappings > 0)
13756 iemMemRollback(pVCpu);
13757 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13758 }
13759
13760 /*
13761 * Return value fiddling, statistics and sanity assertions.
13762 */
13763 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13764
13765 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13766 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13767 return rcStrict;
13768}
13769
13770
13771#ifdef IN_RC
13772/**
13773 * Re-enters raw-mode or ensure we return to ring-3.
13774 *
13775 * @returns rcStrict, maybe modified.
13776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13777 * @param rcStrict The status code returne by the interpreter.
13778 */
13779DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13780{
13781 if ( !pVCpu->iem.s.fInPatchCode
13782 && ( rcStrict == VINF_SUCCESS
13783 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13784 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13785 {
13786 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13787 CPUMRawEnter(pVCpu);
13788 else
13789 {
13790 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13791 rcStrict = VINF_EM_RESCHEDULE;
13792 }
13793 }
13794 return rcStrict;
13795}
13796#endif
13797
13798
13799/**
13800 * Execute one instruction.
13801 *
13802 * @return Strict VBox status code.
13803 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13804 */
13805VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13806{
13807#ifdef LOG_ENABLED
13808 iemLogCurInstr(pVCpu, true, "IEMExecOne");
13809#endif
13810
13811 /*
13812 * Do the decoding and emulation.
13813 */
13814 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13815 if (rcStrict == VINF_SUCCESS)
13816 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
13817 else if (pVCpu->iem.s.cActiveMappings > 0)
13818 iemMemRollback(pVCpu);
13819
13820#ifdef IN_RC
13821 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13822#endif
13823 if (rcStrict != VINF_SUCCESS)
13824 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13825 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13826 return rcStrict;
13827}
13828
13829
13830VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13831{
13832 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13833
13834 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13835 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13836 if (rcStrict == VINF_SUCCESS)
13837 {
13838 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
13839 if (pcbWritten)
13840 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13841 }
13842 else if (pVCpu->iem.s.cActiveMappings > 0)
13843 iemMemRollback(pVCpu);
13844
13845#ifdef IN_RC
13846 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13847#endif
13848 return rcStrict;
13849}
13850
13851
13852VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13853 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13854{
13855 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13856
13857 VBOXSTRICTRC rcStrict;
13858 if ( cbOpcodeBytes
13859 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13860 {
13861 iemInitDecoder(pVCpu, false);
13862#ifdef IEM_WITH_CODE_TLB
13863 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13864 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13865 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13866 pVCpu->iem.s.offCurInstrStart = 0;
13867 pVCpu->iem.s.offInstrNextByte = 0;
13868#else
13869 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13870 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13871#endif
13872 rcStrict = VINF_SUCCESS;
13873 }
13874 else
13875 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13876 if (rcStrict == VINF_SUCCESS)
13877 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
13878 else if (pVCpu->iem.s.cActiveMappings > 0)
13879 iemMemRollback(pVCpu);
13880
13881#ifdef IN_RC
13882 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13883#endif
13884 return rcStrict;
13885}
13886
13887
13888VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13889{
13890 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13891
13892 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13893 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13894 if (rcStrict == VINF_SUCCESS)
13895 {
13896 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
13897 if (pcbWritten)
13898 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13899 }
13900 else if (pVCpu->iem.s.cActiveMappings > 0)
13901 iemMemRollback(pVCpu);
13902
13903#ifdef IN_RC
13904 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13905#endif
13906 return rcStrict;
13907}
13908
13909
13910VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13911 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13912{
13913 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13914
13915 VBOXSTRICTRC rcStrict;
13916 if ( cbOpcodeBytes
13917 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13918 {
13919 iemInitDecoder(pVCpu, true);
13920#ifdef IEM_WITH_CODE_TLB
13921 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13922 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13923 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13924 pVCpu->iem.s.offCurInstrStart = 0;
13925 pVCpu->iem.s.offInstrNextByte = 0;
13926#else
13927 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13928 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13929#endif
13930 rcStrict = VINF_SUCCESS;
13931 }
13932 else
13933 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13934 if (rcStrict == VINF_SUCCESS)
13935 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
13936 else if (pVCpu->iem.s.cActiveMappings > 0)
13937 iemMemRollback(pVCpu);
13938
13939#ifdef IN_RC
13940 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13941#endif
13942 return rcStrict;
13943}
13944
13945
13946/**
13947 * For debugging DISGetParamSize, may come in handy.
13948 *
13949 * @returns Strict VBox status code.
13950 * @param pVCpu The cross context virtual CPU structure of the
13951 * calling EMT.
13952 * @param pCtxCore The context core structure.
13953 * @param OpcodeBytesPC The PC of the opcode bytes.
13954 * @param pvOpcodeBytes Prefeched opcode bytes.
13955 * @param cbOpcodeBytes Number of prefetched bytes.
13956 * @param pcbWritten Where to return the number of bytes written.
13957 * Optional.
13958 */
13959VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13960 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13961 uint32_t *pcbWritten)
13962{
13963 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13964
13965 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13966 VBOXSTRICTRC rcStrict;
13967 if ( cbOpcodeBytes
13968 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13969 {
13970 iemInitDecoder(pVCpu, true);
13971#ifdef IEM_WITH_CODE_TLB
13972 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13973 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13974 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13975 pVCpu->iem.s.offCurInstrStart = 0;
13976 pVCpu->iem.s.offInstrNextByte = 0;
13977#else
13978 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13979 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13980#endif
13981 rcStrict = VINF_SUCCESS;
13982 }
13983 else
13984 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13985 if (rcStrict == VINF_SUCCESS)
13986 {
13987 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
13988 if (pcbWritten)
13989 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13990 }
13991 else if (pVCpu->iem.s.cActiveMappings > 0)
13992 iemMemRollback(pVCpu);
13993
13994#ifdef IN_RC
13995 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13996#endif
13997 return rcStrict;
13998}
13999
14000
14001VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14002{
14003 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14004
14005 /*
14006 * See if there is an interrupt pending in TRPM, inject it if we can.
14007 */
14008 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14009#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14010 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14011 if (fIntrEnabled)
14012 {
14013 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14014 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14015 else
14016 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14017 }
14018#else
14019 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14020#endif
14021 if ( fIntrEnabled
14022 && TRPMHasTrap(pVCpu)
14023 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14024 {
14025 uint8_t u8TrapNo;
14026 TRPMEVENT enmType;
14027 RTGCUINT uErrCode;
14028 RTGCPTR uCr2;
14029 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14030 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14031 TRPMResetTrap(pVCpu);
14032 }
14033
14034 /*
14035 * Initial decoder init w/ prefetch, then setup setjmp.
14036 */
14037 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14038 if (rcStrict == VINF_SUCCESS)
14039 {
14040#ifdef IEM_WITH_SETJMP
14041 jmp_buf JmpBuf;
14042 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14043 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14044 pVCpu->iem.s.cActiveMappings = 0;
14045 if ((rcStrict = setjmp(JmpBuf)) == 0)
14046#endif
14047 {
14048 /*
14049 * The run loop. We limit ourselves to 4096 instructions right now.
14050 */
14051 PVM pVM = pVCpu->CTX_SUFF(pVM);
14052 uint32_t cInstr = 4096;
14053 for (;;)
14054 {
14055 /*
14056 * Log the state.
14057 */
14058#ifdef LOG_ENABLED
14059 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14060#endif
14061
14062 /*
14063 * Do the decoding and emulation.
14064 */
14065 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14066 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14068 {
14069 Assert(pVCpu->iem.s.cActiveMappings == 0);
14070 pVCpu->iem.s.cInstructions++;
14071 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14072 {
14073 uint32_t fCpu = pVCpu->fLocalForcedActions
14074 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14075 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14076 | VMCPU_FF_TLB_FLUSH
14077#ifdef VBOX_WITH_RAW_MODE
14078 | VMCPU_FF_TRPM_SYNC_IDT
14079 | VMCPU_FF_SELM_SYNC_TSS
14080 | VMCPU_FF_SELM_SYNC_GDT
14081 | VMCPU_FF_SELM_SYNC_LDT
14082#endif
14083 | VMCPU_FF_INHIBIT_INTERRUPTS
14084 | VMCPU_FF_BLOCK_NMIS
14085 | VMCPU_FF_UNHALT ));
14086
14087 if (RT_LIKELY( ( !fCpu
14088 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14089 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14090 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14091 {
14092 if (cInstr-- > 0)
14093 {
14094 Assert(pVCpu->iem.s.cActiveMappings == 0);
14095 iemReInitDecoder(pVCpu);
14096 continue;
14097 }
14098 }
14099 }
14100 Assert(pVCpu->iem.s.cActiveMappings == 0);
14101 }
14102 else if (pVCpu->iem.s.cActiveMappings > 0)
14103 iemMemRollback(pVCpu);
14104 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14105 break;
14106 }
14107 }
14108#ifdef IEM_WITH_SETJMP
14109 else
14110 {
14111 if (pVCpu->iem.s.cActiveMappings > 0)
14112 iemMemRollback(pVCpu);
14113 pVCpu->iem.s.cLongJumps++;
14114 }
14115 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14116#endif
14117
14118 /*
14119 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14120 */
14121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14123 }
14124 else
14125 {
14126 if (pVCpu->iem.s.cActiveMappings > 0)
14127 iemMemRollback(pVCpu);
14128
14129#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14130 /*
14131 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14132 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14133 */
14134 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14135#endif
14136 }
14137
14138 /*
14139 * Maybe re-enter raw-mode and log.
14140 */
14141#ifdef IN_RC
14142 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14143#endif
14144 if (rcStrict != VINF_SUCCESS)
14145 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14146 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14147 if (pcInstructions)
14148 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14149 return rcStrict;
14150}
14151
14152
14153/**
14154 * Interface used by EMExecuteExec, does exit statistics and limits.
14155 *
14156 * @returns Strict VBox status code.
14157 * @param pVCpu The cross context virtual CPU structure.
14158 * @param fWillExit To be defined.
14159 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14160 * @param cMaxInstructions Maximum number of instructions to execute.
14161 * @param cMaxInstructionsWithoutExits
14162 * The max number of instructions without exits.
14163 * @param pStats Where to return statistics.
14164 */
14165VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14166 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14167{
14168 NOREF(fWillExit); /** @todo define flexible exit crits */
14169
14170 /*
14171 * Initialize return stats.
14172 */
14173 pStats->cInstructions = 0;
14174 pStats->cExits = 0;
14175 pStats->cMaxExitDistance = 0;
14176 pStats->cReserved = 0;
14177
14178 /*
14179 * Initial decoder init w/ prefetch, then setup setjmp.
14180 */
14181 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14182 if (rcStrict == VINF_SUCCESS)
14183 {
14184#ifdef IEM_WITH_SETJMP
14185 jmp_buf JmpBuf;
14186 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14187 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14188 pVCpu->iem.s.cActiveMappings = 0;
14189 if ((rcStrict = setjmp(JmpBuf)) == 0)
14190#endif
14191 {
14192#ifdef IN_RING0
14193 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14194#endif
14195 uint32_t cInstructionSinceLastExit = 0;
14196
14197 /*
14198 * The run loop. We limit ourselves to 4096 instructions right now.
14199 */
14200 PVM pVM = pVCpu->CTX_SUFF(pVM);
14201 for (;;)
14202 {
14203 /*
14204 * Log the state.
14205 */
14206#ifdef LOG_ENABLED
14207 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14208#endif
14209
14210 /*
14211 * Do the decoding and emulation.
14212 */
14213 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14214
14215 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14216 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14217
14218 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14219 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14220 {
14221 pStats->cExits += 1;
14222 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14223 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14224 cInstructionSinceLastExit = 0;
14225 }
14226
14227 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14228 {
14229 Assert(pVCpu->iem.s.cActiveMappings == 0);
14230 pVCpu->iem.s.cInstructions++;
14231 pStats->cInstructions++;
14232 cInstructionSinceLastExit++;
14233 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14234 {
14235 uint32_t fCpu = pVCpu->fLocalForcedActions
14236 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14237 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14238 | VMCPU_FF_TLB_FLUSH
14239#ifdef VBOX_WITH_RAW_MODE
14240 | VMCPU_FF_TRPM_SYNC_IDT
14241 | VMCPU_FF_SELM_SYNC_TSS
14242 | VMCPU_FF_SELM_SYNC_GDT
14243 | VMCPU_FF_SELM_SYNC_LDT
14244#endif
14245 | VMCPU_FF_INHIBIT_INTERRUPTS
14246 | VMCPU_FF_BLOCK_NMIS
14247 | VMCPU_FF_UNHALT ));
14248
14249 if (RT_LIKELY( ( ( !fCpu
14250 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14251 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14252 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14253 || pStats->cInstructions < cMinInstructions))
14254 {
14255 if (pStats->cInstructions < cMaxInstructions)
14256 {
14257 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14258 {
14259#ifdef IN_RING0
14260 if ( !fCheckPreemptionPending
14261 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14262#endif
14263 {
14264 Assert(pVCpu->iem.s.cActiveMappings == 0);
14265 iemReInitDecoder(pVCpu);
14266 continue;
14267 }
14268#ifdef IN_RING0
14269 rcStrict = VINF_EM_RAW_INTERRUPT;
14270 break;
14271#endif
14272 }
14273 }
14274 }
14275 Assert(!(fCpu & VMCPU_FF_IEM));
14276 }
14277 Assert(pVCpu->iem.s.cActiveMappings == 0);
14278 }
14279 else if (pVCpu->iem.s.cActiveMappings > 0)
14280 iemMemRollback(pVCpu);
14281 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14282 break;
14283 }
14284 }
14285#ifdef IEM_WITH_SETJMP
14286 else
14287 {
14288 if (pVCpu->iem.s.cActiveMappings > 0)
14289 iemMemRollback(pVCpu);
14290 pVCpu->iem.s.cLongJumps++;
14291 }
14292 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14293#endif
14294
14295 /*
14296 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14297 */
14298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14300 }
14301 else
14302 {
14303 if (pVCpu->iem.s.cActiveMappings > 0)
14304 iemMemRollback(pVCpu);
14305
14306#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14307 /*
14308 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14309 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14310 */
14311 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14312#endif
14313 }
14314
14315 /*
14316 * Maybe re-enter raw-mode and log.
14317 */
14318#ifdef IN_RC
14319 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14320#endif
14321 if (rcStrict != VINF_SUCCESS)
14322 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14324 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14325 return rcStrict;
14326}
14327
14328
14329/**
14330 * Injects a trap, fault, abort, software interrupt or external interrupt.
14331 *
14332 * The parameter list matches TRPMQueryTrapAll pretty closely.
14333 *
14334 * @returns Strict VBox status code.
14335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14336 * @param u8TrapNo The trap number.
14337 * @param enmType What type is it (trap/fault/abort), software
14338 * interrupt or hardware interrupt.
14339 * @param uErrCode The error code if applicable.
14340 * @param uCr2 The CR2 value if applicable.
14341 * @param cbInstr The instruction length (only relevant for
14342 * software interrupts).
14343 */
14344VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14345 uint8_t cbInstr)
14346{
14347 iemInitDecoder(pVCpu, false);
14348#ifdef DBGFTRACE_ENABLED
14349 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14350 u8TrapNo, enmType, uErrCode, uCr2);
14351#endif
14352
14353 uint32_t fFlags;
14354 switch (enmType)
14355 {
14356 case TRPM_HARDWARE_INT:
14357 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14358 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14359 uErrCode = uCr2 = 0;
14360 break;
14361
14362 case TRPM_SOFTWARE_INT:
14363 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14364 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14365 uErrCode = uCr2 = 0;
14366 break;
14367
14368 case TRPM_TRAP:
14369 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14370 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14371 if (u8TrapNo == X86_XCPT_PF)
14372 fFlags |= IEM_XCPT_FLAGS_CR2;
14373 switch (u8TrapNo)
14374 {
14375 case X86_XCPT_DF:
14376 case X86_XCPT_TS:
14377 case X86_XCPT_NP:
14378 case X86_XCPT_SS:
14379 case X86_XCPT_PF:
14380 case X86_XCPT_AC:
14381 fFlags |= IEM_XCPT_FLAGS_ERR;
14382 break;
14383
14384 case X86_XCPT_NMI:
14385 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14386 break;
14387 }
14388 break;
14389
14390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14391 }
14392
14393 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14394
14395 if (pVCpu->iem.s.cActiveMappings > 0)
14396 iemMemRollback(pVCpu);
14397
14398 return rcStrict;
14399}
14400
14401
14402/**
14403 * Injects the active TRPM event.
14404 *
14405 * @returns Strict VBox status code.
14406 * @param pVCpu The cross context virtual CPU structure.
14407 */
14408VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14409{
14410#ifndef IEM_IMPLEMENTS_TASKSWITCH
14411 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14412#else
14413 uint8_t u8TrapNo;
14414 TRPMEVENT enmType;
14415 RTGCUINT uErrCode;
14416 RTGCUINTPTR uCr2;
14417 uint8_t cbInstr;
14418 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14419 if (RT_FAILURE(rc))
14420 return rc;
14421
14422 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14423# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14424 if (rcStrict == VINF_SVM_VMEXIT)
14425 rcStrict = VINF_SUCCESS;
14426# endif
14427
14428 /** @todo Are there any other codes that imply the event was successfully
14429 * delivered to the guest? See @bugref{6607}. */
14430 if ( rcStrict == VINF_SUCCESS
14431 || rcStrict == VINF_IEM_RAISED_XCPT)
14432 TRPMResetTrap(pVCpu);
14433
14434 return rcStrict;
14435#endif
14436}
14437
14438
14439VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14440{
14441 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14442 return VERR_NOT_IMPLEMENTED;
14443}
14444
14445
14446VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14447{
14448 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14449 return VERR_NOT_IMPLEMENTED;
14450}
14451
14452
14453#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14454/**
14455 * Executes a IRET instruction with default operand size.
14456 *
14457 * This is for PATM.
14458 *
14459 * @returns VBox status code.
14460 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14461 * @param pCtxCore The register frame.
14462 */
14463VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14464{
14465 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14466
14467 iemCtxCoreToCtx(pCtx, pCtxCore);
14468 iemInitDecoder(pVCpu);
14469 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14470 if (rcStrict == VINF_SUCCESS)
14471 iemCtxToCtxCore(pCtxCore, pCtx);
14472 else
14473 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14474 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14475 return rcStrict;
14476}
14477#endif
14478
14479
14480/**
14481 * Macro used by the IEMExec* method to check the given instruction length.
14482 *
14483 * Will return on failure!
14484 *
14485 * @param a_cbInstr The given instruction length.
14486 * @param a_cbMin The minimum length.
14487 */
14488#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14489 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14490 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14491
14492
14493/**
14494 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14495 *
14496 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14497 *
14498 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14500 * @param rcStrict The status code to fiddle.
14501 */
14502DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14503{
14504 iemUninitExec(pVCpu);
14505#ifdef IN_RC
14506 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14507#else
14508 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14509#endif
14510}
14511
14512
14513/**
14514 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14515 *
14516 * This API ASSUMES that the caller has already verified that the guest code is
14517 * allowed to access the I/O port. (The I/O port is in the DX register in the
14518 * guest state.)
14519 *
14520 * @returns Strict VBox status code.
14521 * @param pVCpu The cross context virtual CPU structure.
14522 * @param cbValue The size of the I/O port access (1, 2, or 4).
14523 * @param enmAddrMode The addressing mode.
14524 * @param fRepPrefix Indicates whether a repeat prefix is used
14525 * (doesn't matter which for this instruction).
14526 * @param cbInstr The instruction length in bytes.
14527 * @param iEffSeg The effective segment address.
14528 * @param fIoChecked Whether the access to the I/O port has been
14529 * checked or not. It's typically checked in the
14530 * HM scenario.
14531 */
14532VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14533 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14534{
14535 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14536 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14537
14538 /*
14539 * State init.
14540 */
14541 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14542
14543 /*
14544 * Switch orgy for getting to the right handler.
14545 */
14546 VBOXSTRICTRC rcStrict;
14547 if (fRepPrefix)
14548 {
14549 switch (enmAddrMode)
14550 {
14551 case IEMMODE_16BIT:
14552 switch (cbValue)
14553 {
14554 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14555 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14556 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14557 default:
14558 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14559 }
14560 break;
14561
14562 case IEMMODE_32BIT:
14563 switch (cbValue)
14564 {
14565 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14566 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14567 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14568 default:
14569 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14570 }
14571 break;
14572
14573 case IEMMODE_64BIT:
14574 switch (cbValue)
14575 {
14576 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14577 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14578 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14579 default:
14580 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14581 }
14582 break;
14583
14584 default:
14585 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14586 }
14587 }
14588 else
14589 {
14590 switch (enmAddrMode)
14591 {
14592 case IEMMODE_16BIT:
14593 switch (cbValue)
14594 {
14595 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14596 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14597 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14598 default:
14599 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14600 }
14601 break;
14602
14603 case IEMMODE_32BIT:
14604 switch (cbValue)
14605 {
14606 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14607 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14608 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14609 default:
14610 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14611 }
14612 break;
14613
14614 case IEMMODE_64BIT:
14615 switch (cbValue)
14616 {
14617 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14618 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14619 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14620 default:
14621 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14622 }
14623 break;
14624
14625 default:
14626 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14627 }
14628 }
14629
14630 if (pVCpu->iem.s.cActiveMappings)
14631 iemMemRollback(pVCpu);
14632
14633 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14634}
14635
14636
14637/**
14638 * Interface for HM and EM for executing string I/O IN (read) instructions.
14639 *
14640 * This API ASSUMES that the caller has already verified that the guest code is
14641 * allowed to access the I/O port. (The I/O port is in the DX register in the
14642 * guest state.)
14643 *
14644 * @returns Strict VBox status code.
14645 * @param pVCpu The cross context virtual CPU structure.
14646 * @param cbValue The size of the I/O port access (1, 2, or 4).
14647 * @param enmAddrMode The addressing mode.
14648 * @param fRepPrefix Indicates whether a repeat prefix is used
14649 * (doesn't matter which for this instruction).
14650 * @param cbInstr The instruction length in bytes.
14651 * @param fIoChecked Whether the access to the I/O port has been
14652 * checked or not. It's typically checked in the
14653 * HM scenario.
14654 */
14655VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14656 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14657{
14658 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14659
14660 /*
14661 * State init.
14662 */
14663 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14664
14665 /*
14666 * Switch orgy for getting to the right handler.
14667 */
14668 VBOXSTRICTRC rcStrict;
14669 if (fRepPrefix)
14670 {
14671 switch (enmAddrMode)
14672 {
14673 case IEMMODE_16BIT:
14674 switch (cbValue)
14675 {
14676 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14677 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14678 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14679 default:
14680 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14681 }
14682 break;
14683
14684 case IEMMODE_32BIT:
14685 switch (cbValue)
14686 {
14687 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14688 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14689 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14690 default:
14691 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14692 }
14693 break;
14694
14695 case IEMMODE_64BIT:
14696 switch (cbValue)
14697 {
14698 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14699 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14700 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14701 default:
14702 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14703 }
14704 break;
14705
14706 default:
14707 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14708 }
14709 }
14710 else
14711 {
14712 switch (enmAddrMode)
14713 {
14714 case IEMMODE_16BIT:
14715 switch (cbValue)
14716 {
14717 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14718 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14719 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14720 default:
14721 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14722 }
14723 break;
14724
14725 case IEMMODE_32BIT:
14726 switch (cbValue)
14727 {
14728 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14729 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14730 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14731 default:
14732 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14733 }
14734 break;
14735
14736 case IEMMODE_64BIT:
14737 switch (cbValue)
14738 {
14739 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14740 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14741 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14742 default:
14743 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14744 }
14745 break;
14746
14747 default:
14748 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14749 }
14750 }
14751
14752 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14753 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14754}
14755
14756
14757/**
14758 * Interface for rawmode to write execute an OUT instruction.
14759 *
14760 * @returns Strict VBox status code.
14761 * @param pVCpu The cross context virtual CPU structure.
14762 * @param cbInstr The instruction length in bytes.
14763 * @param u16Port The port to read.
14764 * @param cbReg The register size.
14765 *
14766 * @remarks In ring-0 not all of the state needs to be synced in.
14767 */
14768VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14769{
14770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14771 Assert(cbReg <= 4 && cbReg != 3);
14772
14773 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14774 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14775 Assert(!pVCpu->iem.s.cActiveMappings);
14776 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14777}
14778
14779
14780/**
14781 * Interface for rawmode to write execute an IN instruction.
14782 *
14783 * @returns Strict VBox status code.
14784 * @param pVCpu The cross context virtual CPU structure.
14785 * @param cbInstr The instruction length in bytes.
14786 * @param u16Port The port to read.
14787 * @param cbReg The register size.
14788 */
14789VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14790{
14791 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14792 Assert(cbReg <= 4 && cbReg != 3);
14793
14794 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14795 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14796 Assert(!pVCpu->iem.s.cActiveMappings);
14797 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14798}
14799
14800
14801/**
14802 * Interface for HM and EM to write to a CRx register.
14803 *
14804 * @returns Strict VBox status code.
14805 * @param pVCpu The cross context virtual CPU structure.
14806 * @param cbInstr The instruction length in bytes.
14807 * @param iCrReg The control register number (destination).
14808 * @param iGReg The general purpose register number (source).
14809 *
14810 * @remarks In ring-0 not all of the state needs to be synced in.
14811 */
14812VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14813{
14814 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14815 Assert(iCrReg < 16);
14816 Assert(iGReg < 16);
14817
14818 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14819 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14820 Assert(!pVCpu->iem.s.cActiveMappings);
14821 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14822}
14823
14824
14825/**
14826 * Interface for HM and EM to read from a CRx register.
14827 *
14828 * @returns Strict VBox status code.
14829 * @param pVCpu The cross context virtual CPU structure.
14830 * @param cbInstr The instruction length in bytes.
14831 * @param iGReg The general purpose register number (destination).
14832 * @param iCrReg The control register number (source).
14833 *
14834 * @remarks In ring-0 not all of the state needs to be synced in.
14835 */
14836VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14837{
14838 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14839 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
14840 | CPUMCTX_EXTRN_APIC_TPR);
14841 Assert(iCrReg < 16);
14842 Assert(iGReg < 16);
14843
14844 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14845 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14846 Assert(!pVCpu->iem.s.cActiveMappings);
14847 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14848}
14849
14850
14851/**
14852 * Interface for HM and EM to clear the CR0[TS] bit.
14853 *
14854 * @returns Strict VBox status code.
14855 * @param pVCpu The cross context virtual CPU structure.
14856 * @param cbInstr The instruction length in bytes.
14857 *
14858 * @remarks In ring-0 not all of the state needs to be synced in.
14859 */
14860VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14861{
14862 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14863
14864 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14865 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14866 Assert(!pVCpu->iem.s.cActiveMappings);
14867 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14868}
14869
14870
14871/**
14872 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14873 *
14874 * @returns Strict VBox status code.
14875 * @param pVCpu The cross context virtual CPU structure.
14876 * @param cbInstr The instruction length in bytes.
14877 * @param uValue The value to load into CR0.
14878 *
14879 * @remarks In ring-0 not all of the state needs to be synced in.
14880 */
14881VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14882{
14883 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14884
14885 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14886 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14887 Assert(!pVCpu->iem.s.cActiveMappings);
14888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14889}
14890
14891
14892/**
14893 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14894 *
14895 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14896 *
14897 * @returns Strict VBox status code.
14898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14899 * @param cbInstr The instruction length in bytes.
14900 * @remarks In ring-0 not all of the state needs to be synced in.
14901 * @thread EMT(pVCpu)
14902 */
14903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14904{
14905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14906
14907 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14908 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14909 Assert(!pVCpu->iem.s.cActiveMappings);
14910 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14911}
14912
14913
14914/**
14915 * Interface for HM and EM to emulate the WBINVD instruction.
14916 *
14917 * @returns Strict VBox status code.
14918 * @param pVCpu The cross context virtual CPU structure.
14919 * @param cbInstr The instruction length in bytes.
14920 *
14921 * @remarks In ring-0 not all of the state needs to be synced in.
14922 */
14923VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
14924{
14925 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14926
14927 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14928 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
14929 Assert(!pVCpu->iem.s.cActiveMappings);
14930 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14931}
14932
14933
14934/**
14935 * Interface for HM and EM to emulate the INVD instruction.
14936 *
14937 * @returns Strict VBox status code.
14938 * @param pVCpu The cross context virtual CPU structure.
14939 * @param cbInstr The instruction length in bytes.
14940 *
14941 * @remarks In ring-0 not all of the state needs to be synced in.
14942 */
14943VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
14944{
14945 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14946
14947 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14948 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
14949 Assert(!pVCpu->iem.s.cActiveMappings);
14950 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14951}
14952
14953
14954/**
14955 * Interface for HM and EM to emulate the INVLPG instruction.
14956 *
14957 * @returns Strict VBox status code.
14958 * @retval VINF_PGM_SYNC_CR3
14959 *
14960 * @param pVCpu The cross context virtual CPU structure.
14961 * @param cbInstr The instruction length in bytes.
14962 * @param GCPtrPage The effective address of the page to invalidate.
14963 *
14964 * @remarks In ring-0 not all of the state needs to be synced in.
14965 */
14966VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
14967{
14968 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14969
14970 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14971 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
14972 Assert(!pVCpu->iem.s.cActiveMappings);
14973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14974}
14975
14976
14977/**
14978 * Interface for HM and EM to emulate the INVPCID instruction.
14979 *
14980 * @param pVCpu The cross context virtual CPU structure.
14981 * @param cbInstr The instruction length in bytes.
14982 * @param uType The invalidation type.
14983 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
14984 *
14985 * @remarks In ring-0 not all of the state needs to be synced in.
14986 */
14987VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
14988{
14989 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
14990
14991 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14992 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
14993 Assert(!pVCpu->iem.s.cActiveMappings);
14994 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14995}
14996
14997
14998
14999/**
15000 * Interface for HM and EM to emulate the CPUID instruction.
15001 *
15002 * @returns Strict VBox status code.
15003 *
15004 * @param pVCpu The cross context virtual CPU structure.
15005 * @param cbInstr The instruction length in bytes.
15006 *
15007 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15008 */
15009VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15010{
15011 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15012 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15013
15014 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15015 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15016 Assert(!pVCpu->iem.s.cActiveMappings);
15017 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15018}
15019
15020
15021/**
15022 * Interface for HM and EM to emulate the RDPMC instruction.
15023 *
15024 * @returns Strict VBox status code.
15025 *
15026 * @param pVCpu The cross context virtual CPU structure.
15027 * @param cbInstr The instruction length in bytes.
15028 *
15029 * @remarks Not all of the state needs to be synced in.
15030 */
15031VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15032{
15033 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15034 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15035
15036 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15037 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15038 Assert(!pVCpu->iem.s.cActiveMappings);
15039 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15040}
15041
15042
15043/**
15044 * Interface for HM and EM to emulate the RDTSC instruction.
15045 *
15046 * @returns Strict VBox status code.
15047 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15048 *
15049 * @param pVCpu The cross context virtual CPU structure.
15050 * @param cbInstr The instruction length in bytes.
15051 *
15052 * @remarks Not all of the state needs to be synced in.
15053 */
15054VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15055{
15056 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15057 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15058
15059 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15060 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15061 Assert(!pVCpu->iem.s.cActiveMappings);
15062 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15063}
15064
15065
15066/**
15067 * Interface for HM and EM to emulate the RDTSCP instruction.
15068 *
15069 * @returns Strict VBox status code.
15070 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15071 *
15072 * @param pVCpu The cross context virtual CPU structure.
15073 * @param cbInstr The instruction length in bytes.
15074 *
15075 * @remarks Not all of the state needs to be synced in. Recommended
15076 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15077 */
15078VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15079{
15080 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15081 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15082
15083 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15084 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15085 Assert(!pVCpu->iem.s.cActiveMappings);
15086 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15087}
15088
15089
15090/**
15091 * Interface for HM and EM to emulate the RDMSR instruction.
15092 *
15093 * @returns Strict VBox status code.
15094 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15095 *
15096 * @param pVCpu The cross context virtual CPU structure.
15097 * @param cbInstr The instruction length in bytes.
15098 *
15099 * @remarks Not all of the state needs to be synced in. Requires RCX and
15100 * (currently) all MSRs.
15101 */
15102VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15103{
15104 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15105 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15106
15107 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15108 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15109 Assert(!pVCpu->iem.s.cActiveMappings);
15110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15111}
15112
15113
15114/**
15115 * Interface for HM and EM to emulate the WRMSR instruction.
15116 *
15117 * @returns Strict VBox status code.
15118 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15119 *
15120 * @param pVCpu The cross context virtual CPU structure.
15121 * @param cbInstr The instruction length in bytes.
15122 *
15123 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15124 * and (currently) all MSRs.
15125 */
15126VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15127{
15128 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15129 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15130 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15131
15132 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15133 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15134 Assert(!pVCpu->iem.s.cActiveMappings);
15135 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15136}
15137
15138
15139/**
15140 * Interface for HM and EM to emulate the MONITOR instruction.
15141 *
15142 * @returns Strict VBox status code.
15143 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15144 *
15145 * @param pVCpu The cross context virtual CPU structure.
15146 * @param cbInstr The instruction length in bytes.
15147 *
15148 * @remarks Not all of the state needs to be synced in.
15149 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15150 * are used.
15151 */
15152VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15153{
15154 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15155 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15156
15157 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15158 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15159 Assert(!pVCpu->iem.s.cActiveMappings);
15160 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15161}
15162
15163
15164/**
15165 * Interface for HM and EM to emulate the MWAIT instruction.
15166 *
15167 * @returns Strict VBox status code.
15168 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15169 *
15170 * @param pVCpu The cross context virtual CPU structure.
15171 * @param cbInstr The instruction length in bytes.
15172 *
15173 * @remarks Not all of the state needs to be synced in.
15174 */
15175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15176{
15177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15178
15179 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15180 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15181 Assert(!pVCpu->iem.s.cActiveMappings);
15182 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15183}
15184
15185
15186/**
15187 * Interface for HM and EM to emulate the HLT instruction.
15188 *
15189 * @returns Strict VBox status code.
15190 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15191 *
15192 * @param pVCpu The cross context virtual CPU structure.
15193 * @param cbInstr The instruction length in bytes.
15194 *
15195 * @remarks Not all of the state needs to be synced in.
15196 */
15197VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15198{
15199 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15200
15201 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15202 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15203 Assert(!pVCpu->iem.s.cActiveMappings);
15204 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15205}
15206
15207
15208/**
15209 * Checks if IEM is in the process of delivering an event (interrupt or
15210 * exception).
15211 *
15212 * @returns true if we're in the process of raising an interrupt or exception,
15213 * false otherwise.
15214 * @param pVCpu The cross context virtual CPU structure.
15215 * @param puVector Where to store the vector associated with the
15216 * currently delivered event, optional.
15217 * @param pfFlags Where to store th event delivery flags (see
15218 * IEM_XCPT_FLAGS_XXX), optional.
15219 * @param puErr Where to store the error code associated with the
15220 * event, optional.
15221 * @param puCr2 Where to store the CR2 associated with the event,
15222 * optional.
15223 * @remarks The caller should check the flags to determine if the error code and
15224 * CR2 are valid for the event.
15225 */
15226VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15227{
15228 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15229 if (fRaisingXcpt)
15230 {
15231 if (puVector)
15232 *puVector = pVCpu->iem.s.uCurXcpt;
15233 if (pfFlags)
15234 *pfFlags = pVCpu->iem.s.fCurXcpt;
15235 if (puErr)
15236 *puErr = pVCpu->iem.s.uCurXcptErr;
15237 if (puCr2)
15238 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15239 }
15240 return fRaisingXcpt;
15241}
15242
15243#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15244
15245/**
15246 * Interface for HM and EM to emulate the CLGI instruction.
15247 *
15248 * @returns Strict VBox status code.
15249 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15250 * @param cbInstr The instruction length in bytes.
15251 * @thread EMT(pVCpu)
15252 */
15253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15254{
15255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15256
15257 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15258 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15259 Assert(!pVCpu->iem.s.cActiveMappings);
15260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15261}
15262
15263
15264/**
15265 * Interface for HM and EM to emulate the STGI instruction.
15266 *
15267 * @returns Strict VBox status code.
15268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15269 * @param cbInstr The instruction length in bytes.
15270 * @thread EMT(pVCpu)
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15275
15276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15277 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15278 Assert(!pVCpu->iem.s.cActiveMappings);
15279 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15280}
15281
15282
15283/**
15284 * Interface for HM and EM to emulate the VMLOAD instruction.
15285 *
15286 * @returns Strict VBox status code.
15287 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15288 * @param cbInstr The instruction length in bytes.
15289 * @thread EMT(pVCpu)
15290 */
15291VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15292{
15293 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15294
15295 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15296 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15297 Assert(!pVCpu->iem.s.cActiveMappings);
15298 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15299}
15300
15301
15302/**
15303 * Interface for HM and EM to emulate the VMSAVE instruction.
15304 *
15305 * @returns Strict VBox status code.
15306 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15307 * @param cbInstr The instruction length in bytes.
15308 * @thread EMT(pVCpu)
15309 */
15310VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15311{
15312 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15313
15314 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15315 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15316 Assert(!pVCpu->iem.s.cActiveMappings);
15317 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15318}
15319
15320
15321/**
15322 * Interface for HM and EM to emulate the INVLPGA instruction.
15323 *
15324 * @returns Strict VBox status code.
15325 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15326 * @param cbInstr The instruction length in bytes.
15327 * @thread EMT(pVCpu)
15328 */
15329VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15330{
15331 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15332
15333 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15334 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15335 Assert(!pVCpu->iem.s.cActiveMappings);
15336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15337}
15338
15339
15340/**
15341 * Interface for HM and EM to emulate the VMRUN instruction.
15342 *
15343 * @returns Strict VBox status code.
15344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15345 * @param cbInstr The instruction length in bytes.
15346 * @thread EMT(pVCpu)
15347 */
15348VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15349{
15350 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15351 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15352
15353 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15354 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15355 Assert(!pVCpu->iem.s.cActiveMappings);
15356 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15357}
15358
15359
15360/**
15361 * Interface for HM and EM to emulate \#VMEXIT.
15362 *
15363 * @returns Strict VBox status code.
15364 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15365 * @param uExitCode The exit code.
15366 * @param uExitInfo1 The exit info. 1 field.
15367 * @param uExitInfo2 The exit info. 2 field.
15368 * @thread EMT(pVCpu)
15369 */
15370VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15371{
15372 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15373 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15374 if (pVCpu->iem.s.cActiveMappings)
15375 iemMemRollback(pVCpu);
15376 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15377}
15378
15379#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15380#ifdef IN_RING3
15381
15382/**
15383 * Handles the unlikely and probably fatal merge cases.
15384 *
15385 * @returns Merged status code.
15386 * @param rcStrict Current EM status code.
15387 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15388 * with @a rcStrict.
15389 * @param iMemMap The memory mapping index. For error reporting only.
15390 * @param pVCpu The cross context virtual CPU structure of the calling
15391 * thread, for error reporting only.
15392 */
15393DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15394 unsigned iMemMap, PVMCPU pVCpu)
15395{
15396 if (RT_FAILURE_NP(rcStrict))
15397 return rcStrict;
15398
15399 if (RT_FAILURE_NP(rcStrictCommit))
15400 return rcStrictCommit;
15401
15402 if (rcStrict == rcStrictCommit)
15403 return rcStrictCommit;
15404
15405 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15406 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15407 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15410 return VERR_IOM_FF_STATUS_IPE;
15411}
15412
15413
15414/**
15415 * Helper for IOMR3ProcessForceFlag.
15416 *
15417 * @returns Merged status code.
15418 * @param rcStrict Current EM status code.
15419 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15420 * with @a rcStrict.
15421 * @param iMemMap The memory mapping index. For error reporting only.
15422 * @param pVCpu The cross context virtual CPU structure of the calling
15423 * thread, for error reporting only.
15424 */
15425DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15426{
15427 /* Simple. */
15428 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15429 return rcStrictCommit;
15430
15431 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15432 return rcStrict;
15433
15434 /* EM scheduling status codes. */
15435 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15436 && rcStrict <= VINF_EM_LAST))
15437 {
15438 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15439 && rcStrictCommit <= VINF_EM_LAST))
15440 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15441 }
15442
15443 /* Unlikely */
15444 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15445}
15446
15447
15448/**
15449 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15450 *
15451 * @returns Merge between @a rcStrict and what the commit operation returned.
15452 * @param pVM The cross context VM structure.
15453 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15454 * @param rcStrict The status code returned by ring-0 or raw-mode.
15455 */
15456VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15457{
15458 /*
15459 * Reset the pending commit.
15460 */
15461 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15462 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15463 ("%#x %#x %#x\n",
15464 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15465 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15466
15467 /*
15468 * Commit the pending bounce buffers (usually just one).
15469 */
15470 unsigned cBufs = 0;
15471 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15472 while (iMemMap-- > 0)
15473 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15474 {
15475 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15476 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15477 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15478
15479 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15480 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15481 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15482
15483 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15484 {
15485 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15487 pbBuf,
15488 cbFirst,
15489 PGMACCESSORIGIN_IEM);
15490 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15491 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15492 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15493 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15494 }
15495
15496 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15497 {
15498 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15499 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15500 pbBuf + cbFirst,
15501 cbSecond,
15502 PGMACCESSORIGIN_IEM);
15503 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15504 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15505 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15506 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15507 }
15508 cBufs++;
15509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15510 }
15511
15512 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15513 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15514 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15515 pVCpu->iem.s.cActiveMappings = 0;
15516 return rcStrict;
15517}
15518
15519#endif /* IN_RING3 */
15520
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette