VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72549

Last change on this file since 72549 was 72513, checked in by vboxsync, 7 years ago

IEM: Made str use CImpl to better facilitate SVM intercepts.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 596.8 KB
Line 
1/* $Id: IEMAll.cpp 72513 2018-06-11 14:20:47Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/**
363 * Gets the effective VEX.VVVV value.
364 *
365 * The 4th bit is ignored if not 64-bit code.
366 * @returns effective V-register value.
367 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
368 */
369#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
370 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
403 */
404# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
405 do { \
406 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
407 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
408 } while (0)
409
410/**
411 * Check if an SVM is enabled.
412 */
413# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
414
415/**
416 * Check if an SVM control/instruction intercept is set.
417 */
418# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
419
420/**
421 * Check if an SVM read CRx intercept is set.
422 */
423# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
424
425/**
426 * Check if an SVM write CRx intercept is set.
427 */
428# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
429
430/**
431 * Check if an SVM read DRx intercept is set.
432 */
433# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
434
435/**
436 * Check if an SVM write DRx intercept is set.
437 */
438# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
439
440/**
441 * Check if an SVM exception intercept is set.
442 */
443# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
444
445/**
446 * Get the SVM pause-filter count.
447 */
448# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
449
450/**
451 * Invokes the SVM \#VMEXIT handler for the nested-guest.
452 */
453# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
454 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
455
456/**
457 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
458 * corresponding decode assist information.
459 */
460# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
461 do \
462 { \
463 uint64_t uExitInfo1; \
464 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
465 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
466 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
467 else \
468 uExitInfo1 = 0; \
469 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
470 } while (0)
471
472#else
473# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
474# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
483# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
484# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
485
486#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
487
488
489/*********************************************************************************************************************************
490* Global Variables *
491*********************************************************************************************************************************/
492extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
493
494
495/** Function table for the ADD instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
497{
498 iemAImpl_add_u8, iemAImpl_add_u8_locked,
499 iemAImpl_add_u16, iemAImpl_add_u16_locked,
500 iemAImpl_add_u32, iemAImpl_add_u32_locked,
501 iemAImpl_add_u64, iemAImpl_add_u64_locked
502};
503
504/** Function table for the ADC instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
506{
507 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
508 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
509 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
510 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
511};
512
513/** Function table for the SUB instruction. */
514IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
515{
516 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
517 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
518 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
519 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
520};
521
522/** Function table for the SBB instruction. */
523IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
524{
525 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
526 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
527 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
528 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
529};
530
531/** Function table for the OR instruction. */
532IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
533{
534 iemAImpl_or_u8, iemAImpl_or_u8_locked,
535 iemAImpl_or_u16, iemAImpl_or_u16_locked,
536 iemAImpl_or_u32, iemAImpl_or_u32_locked,
537 iemAImpl_or_u64, iemAImpl_or_u64_locked
538};
539
540/** Function table for the XOR instruction. */
541IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
542{
543 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
544 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
545 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
546 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
547};
548
549/** Function table for the AND instruction. */
550IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
551{
552 iemAImpl_and_u8, iemAImpl_and_u8_locked,
553 iemAImpl_and_u16, iemAImpl_and_u16_locked,
554 iemAImpl_and_u32, iemAImpl_and_u32_locked,
555 iemAImpl_and_u64, iemAImpl_and_u64_locked
556};
557
558/** Function table for the CMP instruction.
559 * @remarks Making operand order ASSUMPTIONS.
560 */
561IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
562{
563 iemAImpl_cmp_u8, NULL,
564 iemAImpl_cmp_u16, NULL,
565 iemAImpl_cmp_u32, NULL,
566 iemAImpl_cmp_u64, NULL
567};
568
569/** Function table for the TEST instruction.
570 * @remarks Making operand order ASSUMPTIONS.
571 */
572IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
573{
574 iemAImpl_test_u8, NULL,
575 iemAImpl_test_u16, NULL,
576 iemAImpl_test_u32, NULL,
577 iemAImpl_test_u64, NULL
578};
579
580/** Function table for the BT instruction. */
581IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
582{
583 NULL, NULL,
584 iemAImpl_bt_u16, NULL,
585 iemAImpl_bt_u32, NULL,
586 iemAImpl_bt_u64, NULL
587};
588
589/** Function table for the BTC instruction. */
590IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
591{
592 NULL, NULL,
593 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
594 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
595 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
596};
597
598/** Function table for the BTR instruction. */
599IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
600{
601 NULL, NULL,
602 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
603 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
604 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
605};
606
607/** Function table for the BTS instruction. */
608IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
609{
610 NULL, NULL,
611 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
612 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
613 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
614};
615
616/** Function table for the BSF instruction. */
617IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
618{
619 NULL, NULL,
620 iemAImpl_bsf_u16, NULL,
621 iemAImpl_bsf_u32, NULL,
622 iemAImpl_bsf_u64, NULL
623};
624
625/** Function table for the BSR instruction. */
626IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
627{
628 NULL, NULL,
629 iemAImpl_bsr_u16, NULL,
630 iemAImpl_bsr_u32, NULL,
631 iemAImpl_bsr_u64, NULL
632};
633
634/** Function table for the IMUL instruction. */
635IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
636{
637 NULL, NULL,
638 iemAImpl_imul_two_u16, NULL,
639 iemAImpl_imul_two_u32, NULL,
640 iemAImpl_imul_two_u64, NULL
641};
642
643/** Group 1 /r lookup table. */
644IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
645{
646 &g_iemAImpl_add,
647 &g_iemAImpl_or,
648 &g_iemAImpl_adc,
649 &g_iemAImpl_sbb,
650 &g_iemAImpl_and,
651 &g_iemAImpl_sub,
652 &g_iemAImpl_xor,
653 &g_iemAImpl_cmp
654};
655
656/** Function table for the INC instruction. */
657IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
658{
659 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
660 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
661 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
662 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
663};
664
665/** Function table for the DEC instruction. */
666IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
667{
668 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
669 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
670 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
671 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
672};
673
674/** Function table for the NEG instruction. */
675IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
676{
677 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
678 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
679 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
680 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
681};
682
683/** Function table for the NOT instruction. */
684IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
685{
686 iemAImpl_not_u8, iemAImpl_not_u8_locked,
687 iemAImpl_not_u16, iemAImpl_not_u16_locked,
688 iemAImpl_not_u32, iemAImpl_not_u32_locked,
689 iemAImpl_not_u64, iemAImpl_not_u64_locked
690};
691
692
693/** Function table for the ROL instruction. */
694IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
695{
696 iemAImpl_rol_u8,
697 iemAImpl_rol_u16,
698 iemAImpl_rol_u32,
699 iemAImpl_rol_u64
700};
701
702/** Function table for the ROR instruction. */
703IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
704{
705 iemAImpl_ror_u8,
706 iemAImpl_ror_u16,
707 iemAImpl_ror_u32,
708 iemAImpl_ror_u64
709};
710
711/** Function table for the RCL instruction. */
712IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
713{
714 iemAImpl_rcl_u8,
715 iemAImpl_rcl_u16,
716 iemAImpl_rcl_u32,
717 iemAImpl_rcl_u64
718};
719
720/** Function table for the RCR instruction. */
721IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
722{
723 iemAImpl_rcr_u8,
724 iemAImpl_rcr_u16,
725 iemAImpl_rcr_u32,
726 iemAImpl_rcr_u64
727};
728
729/** Function table for the SHL instruction. */
730IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
731{
732 iemAImpl_shl_u8,
733 iemAImpl_shl_u16,
734 iemAImpl_shl_u32,
735 iemAImpl_shl_u64
736};
737
738/** Function table for the SHR instruction. */
739IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
740{
741 iemAImpl_shr_u8,
742 iemAImpl_shr_u16,
743 iemAImpl_shr_u32,
744 iemAImpl_shr_u64
745};
746
747/** Function table for the SAR instruction. */
748IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
749{
750 iemAImpl_sar_u8,
751 iemAImpl_sar_u16,
752 iemAImpl_sar_u32,
753 iemAImpl_sar_u64
754};
755
756
757/** Function table for the MUL instruction. */
758IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
759{
760 iemAImpl_mul_u8,
761 iemAImpl_mul_u16,
762 iemAImpl_mul_u32,
763 iemAImpl_mul_u64
764};
765
766/** Function table for the IMUL instruction working implicitly on rAX. */
767IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
768{
769 iemAImpl_imul_u8,
770 iemAImpl_imul_u16,
771 iemAImpl_imul_u32,
772 iemAImpl_imul_u64
773};
774
775/** Function table for the DIV instruction. */
776IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
777{
778 iemAImpl_div_u8,
779 iemAImpl_div_u16,
780 iemAImpl_div_u32,
781 iemAImpl_div_u64
782};
783
784/** Function table for the MUL instruction. */
785IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
786{
787 iemAImpl_idiv_u8,
788 iemAImpl_idiv_u16,
789 iemAImpl_idiv_u32,
790 iemAImpl_idiv_u64
791};
792
793/** Function table for the SHLD instruction */
794IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
795{
796 iemAImpl_shld_u16,
797 iemAImpl_shld_u32,
798 iemAImpl_shld_u64,
799};
800
801/** Function table for the SHRD instruction */
802IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
803{
804 iemAImpl_shrd_u16,
805 iemAImpl_shrd_u32,
806 iemAImpl_shrd_u64,
807};
808
809
810/** Function table for the PUNPCKLBW instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
812/** Function table for the PUNPCKLBD instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
814/** Function table for the PUNPCKLDQ instruction */
815IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
816/** Function table for the PUNPCKLQDQ instruction */
817IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
818
819/** Function table for the PUNPCKHBW instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
821/** Function table for the PUNPCKHBD instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
823/** Function table for the PUNPCKHDQ instruction */
824IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
825/** Function table for the PUNPCKHQDQ instruction */
826IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
827
828/** Function table for the PXOR instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
830/** Function table for the PCMPEQB instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
832/** Function table for the PCMPEQW instruction */
833IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
834/** Function table for the PCMPEQD instruction */
835IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
836
837
838#if defined(IEM_LOG_MEMORY_WRITES)
839/** What IEM just wrote. */
840uint8_t g_abIemWrote[256];
841/** How much IEM just wrote. */
842size_t g_cbIemWrote;
843#endif
844
845
846/*********************************************************************************************************************************
847* Internal Functions *
848*********************************************************************************************************************************/
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
852IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
853/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
855IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
860IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
863IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
864IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
865IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
866#ifdef IEM_WITH_SETJMP
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
871DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
872#endif
873
874IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
883IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
887IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
888IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
889IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
890IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
891
892#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
893IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
894IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
895#endif
896
897/**
898 * Sets the pass up status.
899 *
900 * @returns VINF_SUCCESS.
901 * @param pVCpu The cross context virtual CPU structure of the
902 * calling thread.
903 * @param rcPassUp The pass up status. Must be informational.
904 * VINF_SUCCESS is not allowed.
905 */
906IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
907{
908 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
909
910 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
911 if (rcOldPassUp == VINF_SUCCESS)
912 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
913 /* If both are EM scheduling codes, use EM priority rules. */
914 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
915 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
916 {
917 if (rcPassUp < rcOldPassUp)
918 {
919 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
920 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
921 }
922 else
923 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
924 }
925 /* Override EM scheduling with specific status code. */
926 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
927 {
928 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
929 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
930 }
931 /* Don't override specific status code, first come first served. */
932 else
933 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
934 return VINF_SUCCESS;
935}
936
937
938/**
939 * Calculates the CPU mode.
940 *
941 * This is mainly for updating IEMCPU::enmCpuMode.
942 *
943 * @returns CPU mode.
944 * @param pVCpu The cross context virtual CPU structure of the
945 * calling thread.
946 */
947DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
948{
949 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
950 return IEMMODE_64BIT;
951 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
952 return IEMMODE_32BIT;
953 return IEMMODE_16BIT;
954}
955
956
957/**
958 * Initializes the execution state.
959 *
960 * @param pVCpu The cross context virtual CPU structure of the
961 * calling thread.
962 * @param fBypassHandlers Whether to bypass access handlers.
963 *
964 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
965 * side-effects in strict builds.
966 */
967DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
968{
969 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
970 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
971
972#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
973 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
976 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
981#endif
982
983#ifdef VBOX_WITH_RAW_MODE_NOT_R0
984 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
985#endif
986 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
987 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
988#ifdef VBOX_STRICT
989 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
990 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
991 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
992 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
993 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
994 pVCpu->iem.s.uRexReg = 127;
995 pVCpu->iem.s.uRexB = 127;
996 pVCpu->iem.s.uRexIndex = 127;
997 pVCpu->iem.s.iEffSeg = 127;
998 pVCpu->iem.s.idxPrefix = 127;
999 pVCpu->iem.s.uVex3rdReg = 127;
1000 pVCpu->iem.s.uVexLength = 127;
1001 pVCpu->iem.s.fEvexStuff = 127;
1002 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1003# ifdef IEM_WITH_CODE_TLB
1004 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1005 pVCpu->iem.s.pbInstrBuf = NULL;
1006 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1007 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1008 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1009 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1010# else
1011 pVCpu->iem.s.offOpcode = 127;
1012 pVCpu->iem.s.cbOpcode = 127;
1013# endif
1014#endif
1015
1016 pVCpu->iem.s.cActiveMappings = 0;
1017 pVCpu->iem.s.iNextMapping = 0;
1018 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1019 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1020#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1021 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1022 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1023 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1024 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1025 if (!pVCpu->iem.s.fInPatchCode)
1026 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1027#endif
1028}
1029
1030#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1031/**
1032 * Performs a minimal reinitialization of the execution state.
1033 *
1034 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1035 * 'world-switch' types operations on the CPU. Currently only nested
1036 * hardware-virtualization uses it.
1037 *
1038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1039 */
1040IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1041{
1042 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1043 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1044
1045 pVCpu->iem.s.uCpl = uCpl;
1046 pVCpu->iem.s.enmCpuMode = enmMode;
1047 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1048 pVCpu->iem.s.enmEffAddrMode = enmMode;
1049 if (enmMode != IEMMODE_64BIT)
1050 {
1051 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1052 pVCpu->iem.s.enmEffOpSize = enmMode;
1053 }
1054 else
1055 {
1056 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1057 pVCpu->iem.s.enmEffOpSize = enmMode;
1058 }
1059 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1060#ifndef IEM_WITH_CODE_TLB
1061 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1062 pVCpu->iem.s.offOpcode = 0;
1063 pVCpu->iem.s.cbOpcode = 0;
1064#endif
1065 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1066}
1067#endif
1068
1069/**
1070 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1071 *
1072 * @param pVCpu The cross context virtual CPU structure of the
1073 * calling thread.
1074 */
1075DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1076{
1077 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1078#ifdef VBOX_STRICT
1079# ifdef IEM_WITH_CODE_TLB
1080 NOREF(pVCpu);
1081# else
1082 pVCpu->iem.s.cbOpcode = 0;
1083# endif
1084#else
1085 NOREF(pVCpu);
1086#endif
1087}
1088
1089
1090/**
1091 * Initializes the decoder state.
1092 *
1093 * iemReInitDecoder is mostly a copy of this function.
1094 *
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param fBypassHandlers Whether to bypass access handlers.
1098 */
1099DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1100{
1101 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1102 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1103
1104#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1105 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1107 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1108 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1109 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1110 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1111 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1113#endif
1114
1115#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1116 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1117#endif
1118 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1119 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1120 pVCpu->iem.s.enmCpuMode = enmMode;
1121 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1122 pVCpu->iem.s.enmEffAddrMode = enmMode;
1123 if (enmMode != IEMMODE_64BIT)
1124 {
1125 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1126 pVCpu->iem.s.enmEffOpSize = enmMode;
1127 }
1128 else
1129 {
1130 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1131 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1132 }
1133 pVCpu->iem.s.fPrefixes = 0;
1134 pVCpu->iem.s.uRexReg = 0;
1135 pVCpu->iem.s.uRexB = 0;
1136 pVCpu->iem.s.uRexIndex = 0;
1137 pVCpu->iem.s.idxPrefix = 0;
1138 pVCpu->iem.s.uVex3rdReg = 0;
1139 pVCpu->iem.s.uVexLength = 0;
1140 pVCpu->iem.s.fEvexStuff = 0;
1141 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1142#ifdef IEM_WITH_CODE_TLB
1143 pVCpu->iem.s.pbInstrBuf = NULL;
1144 pVCpu->iem.s.offInstrNextByte = 0;
1145 pVCpu->iem.s.offCurInstrStart = 0;
1146# ifdef VBOX_STRICT
1147 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1148 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1149 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1150# endif
1151#else
1152 pVCpu->iem.s.offOpcode = 0;
1153 pVCpu->iem.s.cbOpcode = 0;
1154#endif
1155 pVCpu->iem.s.cActiveMappings = 0;
1156 pVCpu->iem.s.iNextMapping = 0;
1157 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1158 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1159#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1160 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1161 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1162 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1163 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1164 if (!pVCpu->iem.s.fInPatchCode)
1165 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1166#endif
1167
1168#ifdef DBGFTRACE_ENABLED
1169 switch (enmMode)
1170 {
1171 case IEMMODE_64BIT:
1172 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1173 break;
1174 case IEMMODE_32BIT:
1175 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1176 break;
1177 case IEMMODE_16BIT:
1178 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1179 break;
1180 }
1181#endif
1182}
1183
1184
1185/**
1186 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1187 *
1188 * This is mostly a copy of iemInitDecoder.
1189 *
1190 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1191 */
1192DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1193{
1194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1195
1196#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1205#endif
1206
1207 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1208 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1209 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1210 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1211 pVCpu->iem.s.enmEffAddrMode = enmMode;
1212 if (enmMode != IEMMODE_64BIT)
1213 {
1214 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffOpSize = enmMode;
1216 }
1217 else
1218 {
1219 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1220 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1221 }
1222 pVCpu->iem.s.fPrefixes = 0;
1223 pVCpu->iem.s.uRexReg = 0;
1224 pVCpu->iem.s.uRexB = 0;
1225 pVCpu->iem.s.uRexIndex = 0;
1226 pVCpu->iem.s.idxPrefix = 0;
1227 pVCpu->iem.s.uVex3rdReg = 0;
1228 pVCpu->iem.s.uVexLength = 0;
1229 pVCpu->iem.s.fEvexStuff = 0;
1230 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1231#ifdef IEM_WITH_CODE_TLB
1232 if (pVCpu->iem.s.pbInstrBuf)
1233 {
1234 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1235 - pVCpu->iem.s.uInstrBufPc;
1236 if (off < pVCpu->iem.s.cbInstrBufTotal)
1237 {
1238 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1239 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1240 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1241 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1242 else
1243 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1244 }
1245 else
1246 {
1247 pVCpu->iem.s.pbInstrBuf = NULL;
1248 pVCpu->iem.s.offInstrNextByte = 0;
1249 pVCpu->iem.s.offCurInstrStart = 0;
1250 pVCpu->iem.s.cbInstrBuf = 0;
1251 pVCpu->iem.s.cbInstrBufTotal = 0;
1252 }
1253 }
1254 else
1255 {
1256 pVCpu->iem.s.offInstrNextByte = 0;
1257 pVCpu->iem.s.offCurInstrStart = 0;
1258 pVCpu->iem.s.cbInstrBuf = 0;
1259 pVCpu->iem.s.cbInstrBufTotal = 0;
1260 }
1261#else
1262 pVCpu->iem.s.cbOpcode = 0;
1263 pVCpu->iem.s.offOpcode = 0;
1264#endif
1265 Assert(pVCpu->iem.s.cActiveMappings == 0);
1266 pVCpu->iem.s.iNextMapping = 0;
1267 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1268 Assert(pVCpu->iem.s.fBypassHandlers == false);
1269#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1270 if (!pVCpu->iem.s.fInPatchCode)
1271 { /* likely */ }
1272 else
1273 {
1274 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1275 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1276 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1277 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1278 if (!pVCpu->iem.s.fInPatchCode)
1279 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1280 }
1281#endif
1282
1283#ifdef DBGFTRACE_ENABLED
1284 switch (enmMode)
1285 {
1286 case IEMMODE_64BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1288 break;
1289 case IEMMODE_32BIT:
1290 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1291 break;
1292 case IEMMODE_16BIT:
1293 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1294 break;
1295 }
1296#endif
1297}
1298
1299
1300
1301/**
1302 * Prefetch opcodes the first time when starting executing.
1303 *
1304 * @returns Strict VBox status code.
1305 * @param pVCpu The cross context virtual CPU structure of the
1306 * calling thread.
1307 * @param fBypassHandlers Whether to bypass access handlers.
1308 */
1309IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1310{
1311 iemInitDecoder(pVCpu, fBypassHandlers);
1312
1313#ifdef IEM_WITH_CODE_TLB
1314 /** @todo Do ITLB lookup here. */
1315
1316#else /* !IEM_WITH_CODE_TLB */
1317
1318 /*
1319 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1320 *
1321 * First translate CS:rIP to a physical address.
1322 */
1323 uint32_t cbToTryRead;
1324 RTGCPTR GCPtrPC;
1325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1326 {
1327 cbToTryRead = PAGE_SIZE;
1328 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1329 if (IEM_IS_CANONICAL(GCPtrPC))
1330 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1331 else
1332 return iemRaiseGeneralProtectionFault0(pVCpu);
1333 }
1334 else
1335 {
1336 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1337 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1338 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1339 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1340 else
1341 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1342 if (cbToTryRead) { /* likely */ }
1343 else /* overflowed */
1344 {
1345 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1346 cbToTryRead = UINT32_MAX;
1347 }
1348 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1349 Assert(GCPtrPC <= UINT32_MAX);
1350 }
1351
1352# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1353 /* Allow interpretation of patch manager code blocks since they can for
1354 instance throw #PFs for perfectly good reasons. */
1355 if (pVCpu->iem.s.fInPatchCode)
1356 {
1357 size_t cbRead = 0;
1358 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1359 AssertRCReturn(rc, rc);
1360 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1361 return VINF_SUCCESS;
1362 }
1363# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1364
1365 RTGCPHYS GCPhys;
1366 uint64_t fFlags;
1367 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1368 if (RT_SUCCESS(rc)) { /* probable */ }
1369 else
1370 {
1371 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1372 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1373 }
1374 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1375 else
1376 {
1377 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1378 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1379 }
1380 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1381 else
1382 {
1383 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1384 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1385 }
1386 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1387 /** @todo Check reserved bits and such stuff. PGM is better at doing
1388 * that, so do it when implementing the guest virtual address
1389 * TLB... */
1390
1391 /*
1392 * Read the bytes at this address.
1393 */
1394 PVM pVM = pVCpu->CTX_SUFF(pVM);
1395# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1396 size_t cbActual;
1397 if ( PATMIsEnabled(pVM)
1398 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1399 {
1400 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1401 Assert(cbActual > 0);
1402 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1403 }
1404 else
1405# endif
1406 {
1407 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1408 if (cbToTryRead > cbLeftOnPage)
1409 cbToTryRead = cbLeftOnPage;
1410 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1411 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1412
1413 if (!pVCpu->iem.s.fBypassHandlers)
1414 {
1415 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1416 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1417 { /* likely */ }
1418 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1419 {
1420 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1421 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1422 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1423 }
1424 else
1425 {
1426 Log((RT_SUCCESS(rcStrict)
1427 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1428 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1429 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1430 return rcStrict;
1431 }
1432 }
1433 else
1434 {
1435 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1436 if (RT_SUCCESS(rc))
1437 { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1441 GCPtrPC, GCPhys, rc, cbToTryRead));
1442 return rc;
1443 }
1444 }
1445 pVCpu->iem.s.cbOpcode = cbToTryRead;
1446 }
1447#endif /* !IEM_WITH_CODE_TLB */
1448 return VINF_SUCCESS;
1449}
1450
1451
1452/**
1453 * Invalidates the IEM TLBs.
1454 *
1455 * This is called internally as well as by PGM when moving GC mappings.
1456 *
1457 * @returns
1458 * @param pVCpu The cross context virtual CPU structure of the calling
1459 * thread.
1460 * @param fVmm Set when PGM calls us with a remapping.
1461 */
1462VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1463{
1464#ifdef IEM_WITH_CODE_TLB
1465 pVCpu->iem.s.cbInstrBufTotal = 0;
1466 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1467 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1468 { /* very likely */ }
1469 else
1470 {
1471 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1472 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1473 while (i-- > 0)
1474 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1475 }
1476#endif
1477
1478#ifdef IEM_WITH_DATA_TLB
1479 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1480 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1481 { /* very likely */ }
1482 else
1483 {
1484 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1485 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1486 while (i-- > 0)
1487 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1488 }
1489#endif
1490 NOREF(pVCpu); NOREF(fVmm);
1491}
1492
1493
1494/**
1495 * Invalidates a page in the TLBs.
1496 *
1497 * @param pVCpu The cross context virtual CPU structure of the calling
1498 * thread.
1499 * @param GCPtr The address of the page to invalidate
1500 */
1501VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1502{
1503#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1504 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1505 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1506 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1507 uintptr_t idx = (uint8_t)GCPtr;
1508
1509# ifdef IEM_WITH_CODE_TLB
1510 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1511 {
1512 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1513 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 }
1516# endif
1517
1518# ifdef IEM_WITH_DATA_TLB
1519 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1520 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1521# endif
1522#else
1523 NOREF(pVCpu); NOREF(GCPtr);
1524#endif
1525}
1526
1527
1528/**
1529 * Invalidates the host physical aspects of the IEM TLBs.
1530 *
1531 * This is called internally as well as by PGM when moving GC mappings.
1532 *
1533 * @param pVCpu The cross context virtual CPU structure of the calling
1534 * thread.
1535 */
1536VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1537{
1538#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1539 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1540
1541# ifdef IEM_WITH_CODE_TLB
1542 pVCpu->iem.s.cbInstrBufTotal = 0;
1543# endif
1544 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1545 if (uTlbPhysRev != 0)
1546 {
1547 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1548 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1549 }
1550 else
1551 {
1552 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1553 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1554
1555 unsigned i;
1556# ifdef IEM_WITH_CODE_TLB
1557 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1558 while (i-- > 0)
1559 {
1560 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1561 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1562 }
1563# endif
1564# ifdef IEM_WITH_DATA_TLB
1565 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1566 while (i-- > 0)
1567 {
1568 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1569 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1570 }
1571# endif
1572 }
1573#else
1574 NOREF(pVCpu);
1575#endif
1576}
1577
1578
1579/**
1580 * Invalidates the host physical aspects of the IEM TLBs.
1581 *
1582 * This is called internally as well as by PGM when moving GC mappings.
1583 *
1584 * @param pVM The cross context VM structure.
1585 *
1586 * @remarks Caller holds the PGM lock.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1589{
1590 RT_NOREF_PV(pVM);
1591}
1592
1593#ifdef IEM_WITH_CODE_TLB
1594
1595/**
1596 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1597 * failure and jumps.
1598 *
1599 * We end up here for a number of reasons:
1600 * - pbInstrBuf isn't yet initialized.
1601 * - Advancing beyond the buffer boundrary (e.g. cross page).
1602 * - Advancing beyond the CS segment limit.
1603 * - Fetching from non-mappable page (e.g. MMIO).
1604 *
1605 * @param pVCpu The cross context virtual CPU structure of the
1606 * calling thread.
1607 * @param pvDst Where to return the bytes.
1608 * @param cbDst Number of bytes to read.
1609 *
1610 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1611 */
1612IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1613{
1614#ifdef IN_RING3
1615 for (;;)
1616 {
1617 Assert(cbDst <= 8);
1618 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1619
1620 /*
1621 * We might have a partial buffer match, deal with that first to make the
1622 * rest simpler. This is the first part of the cross page/buffer case.
1623 */
1624 if (pVCpu->iem.s.pbInstrBuf != NULL)
1625 {
1626 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1627 {
1628 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1629 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1630 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1631
1632 cbDst -= cbCopy;
1633 pvDst = (uint8_t *)pvDst + cbCopy;
1634 offBuf += cbCopy;
1635 pVCpu->iem.s.offInstrNextByte += offBuf;
1636 }
1637 }
1638
1639 /*
1640 * Check segment limit, figuring how much we're allowed to access at this point.
1641 *
1642 * We will fault immediately if RIP is past the segment limit / in non-canonical
1643 * territory. If we do continue, there are one or more bytes to read before we
1644 * end up in trouble and we need to do that first before faulting.
1645 */
1646 RTGCPTR GCPtrFirst;
1647 uint32_t cbMaxRead;
1648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1649 {
1650 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1651 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1652 { /* likely */ }
1653 else
1654 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1655 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1656 }
1657 else
1658 {
1659 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1660 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1661 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1662 { /* likely */ }
1663 else
1664 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1665 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1666 if (cbMaxRead != 0)
1667 { /* likely */ }
1668 else
1669 {
1670 /* Overflowed because address is 0 and limit is max. */
1671 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1672 cbMaxRead = X86_PAGE_SIZE;
1673 }
1674 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1675 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1676 if (cbMaxRead2 < cbMaxRead)
1677 cbMaxRead = cbMaxRead2;
1678 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1679 }
1680
1681 /*
1682 * Get the TLB entry for this piece of code.
1683 */
1684 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1685 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1686 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1687 if (pTlbe->uTag == uTag)
1688 {
1689 /* likely when executing lots of code, otherwise unlikely */
1690# ifdef VBOX_WITH_STATISTICS
1691 pVCpu->iem.s.CodeTlb.cTlbHits++;
1692# endif
1693 }
1694 else
1695 {
1696 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1697# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1698 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1699 {
1700 pTlbe->uTag = uTag;
1701 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1702 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1703 pTlbe->GCPhys = NIL_RTGCPHYS;
1704 pTlbe->pbMappingR3 = NULL;
1705 }
1706 else
1707# endif
1708 {
1709 RTGCPHYS GCPhys;
1710 uint64_t fFlags;
1711 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1712 if (RT_FAILURE(rc))
1713 {
1714 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1715 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1716 }
1717
1718 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1719 pTlbe->uTag = uTag;
1720 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1721 pTlbe->GCPhys = GCPhys;
1722 pTlbe->pbMappingR3 = NULL;
1723 }
1724 }
1725
1726 /*
1727 * Check TLB page table level access flags.
1728 */
1729 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1730 {
1731 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1732 {
1733 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1734 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1735 }
1736 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1737 {
1738 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1739 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1740 }
1741 }
1742
1743# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1744 /*
1745 * Allow interpretation of patch manager code blocks since they can for
1746 * instance throw #PFs for perfectly good reasons.
1747 */
1748 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1749 { /* no unlikely */ }
1750 else
1751 {
1752 /** @todo Could be optimized this a little in ring-3 if we liked. */
1753 size_t cbRead = 0;
1754 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1755 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1756 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1757 return;
1758 }
1759# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1760
1761 /*
1762 * Look up the physical page info if necessary.
1763 */
1764 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1765 { /* not necessary */ }
1766 else
1767 {
1768 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1769 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1770 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1771 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1772 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1773 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1774 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1775 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1776 }
1777
1778# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1779 /*
1780 * Try do a direct read using the pbMappingR3 pointer.
1781 */
1782 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1783 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1784 {
1785 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1786 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1787 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1788 {
1789 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1790 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1791 }
1792 else
1793 {
1794 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1795 Assert(cbInstr < cbMaxRead);
1796 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1797 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1798 }
1799 if (cbDst <= cbMaxRead)
1800 {
1801 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1802 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1803 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1804 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1805 return;
1806 }
1807 pVCpu->iem.s.pbInstrBuf = NULL;
1808
1809 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1810 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1811 }
1812 else
1813# endif
1814#if 0
1815 /*
1816 * If there is no special read handling, so we can read a bit more and
1817 * put it in the prefetch buffer.
1818 */
1819 if ( cbDst < cbMaxRead
1820 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1821 {
1822 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1823 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1824 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1825 { /* likely */ }
1826 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1827 {
1828 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1829 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1830 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1831 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1832 }
1833 else
1834 {
1835 Log((RT_SUCCESS(rcStrict)
1836 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1837 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1838 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1839 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1840 }
1841 }
1842 /*
1843 * Special read handling, so only read exactly what's needed.
1844 * This is a highly unlikely scenario.
1845 */
1846 else
1847#endif
1848 {
1849 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1850 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1851 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1852 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1853 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1854 { /* likely */ }
1855 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1856 {
1857 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1858 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1860 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1861 }
1862 else
1863 {
1864 Log((RT_SUCCESS(rcStrict)
1865 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1866 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1867 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1868 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1869 }
1870 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1871 if (cbToRead == cbDst)
1872 return;
1873 }
1874
1875 /*
1876 * More to read, loop.
1877 */
1878 cbDst -= cbMaxRead;
1879 pvDst = (uint8_t *)pvDst + cbMaxRead;
1880 }
1881#else
1882 RT_NOREF(pvDst, cbDst);
1883 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1884#endif
1885}
1886
1887#else
1888
1889/**
1890 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1891 * exception if it fails.
1892 *
1893 * @returns Strict VBox status code.
1894 * @param pVCpu The cross context virtual CPU structure of the
1895 * calling thread.
1896 * @param cbMin The minimum number of bytes relative offOpcode
1897 * that must be read.
1898 */
1899IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1900{
1901 /*
1902 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1903 *
1904 * First translate CS:rIP to a physical address.
1905 */
1906 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1907 uint32_t cbToTryRead;
1908 RTGCPTR GCPtrNext;
1909 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1910 {
1911 cbToTryRead = PAGE_SIZE;
1912 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1913 if (!IEM_IS_CANONICAL(GCPtrNext))
1914 return iemRaiseGeneralProtectionFault0(pVCpu);
1915 }
1916 else
1917 {
1918 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1919 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1920 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1921 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1922 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1923 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1924 if (!cbToTryRead) /* overflowed */
1925 {
1926 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1927 cbToTryRead = UINT32_MAX;
1928 /** @todo check out wrapping around the code segment. */
1929 }
1930 if (cbToTryRead < cbMin - cbLeft)
1931 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1932 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1933 }
1934
1935 /* Only read up to the end of the page, and make sure we don't read more
1936 than the opcode buffer can hold. */
1937 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1938 if (cbToTryRead > cbLeftOnPage)
1939 cbToTryRead = cbLeftOnPage;
1940 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1941 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1942/** @todo r=bird: Convert assertion into undefined opcode exception? */
1943 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1944
1945# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1946 /* Allow interpretation of patch manager code blocks since they can for
1947 instance throw #PFs for perfectly good reasons. */
1948 if (pVCpu->iem.s.fInPatchCode)
1949 {
1950 size_t cbRead = 0;
1951 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1952 AssertRCReturn(rc, rc);
1953 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1954 return VINF_SUCCESS;
1955 }
1956# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1957
1958 RTGCPHYS GCPhys;
1959 uint64_t fFlags;
1960 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1961 if (RT_FAILURE(rc))
1962 {
1963 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1964 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1965 }
1966 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1970 }
1971 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1977 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1978 /** @todo Check reserved bits and such stuff. PGM is better at doing
1979 * that, so do it when implementing the guest virtual address
1980 * TLB... */
1981
1982 /*
1983 * Read the bytes at this address.
1984 *
1985 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1986 * and since PATM should only patch the start of an instruction there
1987 * should be no need to check again here.
1988 */
1989 if (!pVCpu->iem.s.fBypassHandlers)
1990 {
1991 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1992 cbToTryRead, PGMACCESSORIGIN_IEM);
1993 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1994 { /* likely */ }
1995 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1996 {
1997 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1998 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1999 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2000 }
2001 else
2002 {
2003 Log((RT_SUCCESS(rcStrict)
2004 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2005 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2006 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2007 return rcStrict;
2008 }
2009 }
2010 else
2011 {
2012 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2013 if (RT_SUCCESS(rc))
2014 { /* likely */ }
2015 else
2016 {
2017 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2018 return rc;
2019 }
2020 }
2021 pVCpu->iem.s.cbOpcode += cbToTryRead;
2022 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2023
2024 return VINF_SUCCESS;
2025}
2026
2027#endif /* !IEM_WITH_CODE_TLB */
2028#ifndef IEM_WITH_SETJMP
2029
2030/**
2031 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2032 *
2033 * @returns Strict VBox status code.
2034 * @param pVCpu The cross context virtual CPU structure of the
2035 * calling thread.
2036 * @param pb Where to return the opcode byte.
2037 */
2038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2039{
2040 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2041 if (rcStrict == VINF_SUCCESS)
2042 {
2043 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2044 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2045 pVCpu->iem.s.offOpcode = offOpcode + 1;
2046 }
2047 else
2048 *pb = 0;
2049 return rcStrict;
2050}
2051
2052
2053/**
2054 * Fetches the next opcode byte.
2055 *
2056 * @returns Strict VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure of the
2058 * calling thread.
2059 * @param pu8 Where to return the opcode byte.
2060 */
2061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2062{
2063 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2064 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2065 {
2066 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2067 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2068 return VINF_SUCCESS;
2069 }
2070 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2071}
2072
2073#else /* IEM_WITH_SETJMP */
2074
2075/**
2076 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2077 *
2078 * @returns The opcode byte.
2079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2080 */
2081DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2082{
2083# ifdef IEM_WITH_CODE_TLB
2084 uint8_t u8;
2085 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2086 return u8;
2087# else
2088 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2089 if (rcStrict == VINF_SUCCESS)
2090 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2091 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2092# endif
2093}
2094
2095
2096/**
2097 * Fetches the next opcode byte, longjmp on error.
2098 *
2099 * @returns The opcode byte.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 */
2102DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2103{
2104# ifdef IEM_WITH_CODE_TLB
2105 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2106 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2107 if (RT_LIKELY( pbBuf != NULL
2108 && offBuf < pVCpu->iem.s.cbInstrBuf))
2109 {
2110 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2111 return pbBuf[offBuf];
2112 }
2113# else
2114 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2115 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2116 {
2117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2118 return pVCpu->iem.s.abOpcode[offOpcode];
2119 }
2120# endif
2121 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2122}
2123
2124#endif /* IEM_WITH_SETJMP */
2125
2126/**
2127 * Fetches the next opcode byte, returns automatically on failure.
2128 *
2129 * @param a_pu8 Where to return the opcode byte.
2130 * @remark Implicitly references pVCpu.
2131 */
2132#ifndef IEM_WITH_SETJMP
2133# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2134 do \
2135 { \
2136 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2137 if (rcStrict2 == VINF_SUCCESS) \
2138 { /* likely */ } \
2139 else \
2140 return rcStrict2; \
2141 } while (0)
2142#else
2143# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2144#endif /* IEM_WITH_SETJMP */
2145
2146
2147#ifndef IEM_WITH_SETJMP
2148/**
2149 * Fetches the next signed byte from the opcode stream.
2150 *
2151 * @returns Strict VBox status code.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 * @param pi8 Where to return the signed byte.
2154 */
2155DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2156{
2157 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2158}
2159#endif /* !IEM_WITH_SETJMP */
2160
2161
2162/**
2163 * Fetches the next signed byte from the opcode stream, returning automatically
2164 * on failure.
2165 *
2166 * @param a_pi8 Where to return the signed byte.
2167 * @remark Implicitly references pVCpu.
2168 */
2169#ifndef IEM_WITH_SETJMP
2170# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2171 do \
2172 { \
2173 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2174 if (rcStrict2 != VINF_SUCCESS) \
2175 return rcStrict2; \
2176 } while (0)
2177#else /* IEM_WITH_SETJMP */
2178# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2179
2180#endif /* IEM_WITH_SETJMP */
2181
2182#ifndef IEM_WITH_SETJMP
2183
2184/**
2185 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2186 *
2187 * @returns Strict VBox status code.
2188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2189 * @param pu16 Where to return the opcode dword.
2190 */
2191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2192{
2193 uint8_t u8;
2194 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2195 if (rcStrict == VINF_SUCCESS)
2196 *pu16 = (int8_t)u8;
2197 return rcStrict;
2198}
2199
2200
2201/**
2202 * Fetches the next signed byte from the opcode stream, extending it to
2203 * unsigned 16-bit.
2204 *
2205 * @returns Strict VBox status code.
2206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2207 * @param pu16 Where to return the unsigned word.
2208 */
2209DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2210{
2211 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2212 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2213 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2214
2215 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2216 pVCpu->iem.s.offOpcode = offOpcode + 1;
2217 return VINF_SUCCESS;
2218}
2219
2220#endif /* !IEM_WITH_SETJMP */
2221
2222/**
2223 * Fetches the next signed byte from the opcode stream and sign-extending it to
2224 * a word, returning automatically on failure.
2225 *
2226 * @param a_pu16 Where to return the word.
2227 * @remark Implicitly references pVCpu.
2228 */
2229#ifndef IEM_WITH_SETJMP
2230# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2231 do \
2232 { \
2233 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2234 if (rcStrict2 != VINF_SUCCESS) \
2235 return rcStrict2; \
2236 } while (0)
2237#else
2238# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2239#endif
2240
2241#ifndef IEM_WITH_SETJMP
2242
2243/**
2244 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2245 *
2246 * @returns Strict VBox status code.
2247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2248 * @param pu32 Where to return the opcode dword.
2249 */
2250DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2251{
2252 uint8_t u8;
2253 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2254 if (rcStrict == VINF_SUCCESS)
2255 *pu32 = (int8_t)u8;
2256 return rcStrict;
2257}
2258
2259
2260/**
2261 * Fetches the next signed byte from the opcode stream, extending it to
2262 * unsigned 32-bit.
2263 *
2264 * @returns Strict VBox status code.
2265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2266 * @param pu32 Where to return the unsigned dword.
2267 */
2268DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2269{
2270 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2271 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2272 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2273
2274 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2275 pVCpu->iem.s.offOpcode = offOpcode + 1;
2276 return VINF_SUCCESS;
2277}
2278
2279#endif /* !IEM_WITH_SETJMP */
2280
2281/**
2282 * Fetches the next signed byte from the opcode stream and sign-extending it to
2283 * a word, returning automatically on failure.
2284 *
2285 * @param a_pu32 Where to return the word.
2286 * @remark Implicitly references pVCpu.
2287 */
2288#ifndef IEM_WITH_SETJMP
2289#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2290 do \
2291 { \
2292 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2293 if (rcStrict2 != VINF_SUCCESS) \
2294 return rcStrict2; \
2295 } while (0)
2296#else
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2298#endif
2299
2300#ifndef IEM_WITH_SETJMP
2301
2302/**
2303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2304 *
2305 * @returns Strict VBox status code.
2306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2307 * @param pu64 Where to return the opcode qword.
2308 */
2309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2310{
2311 uint8_t u8;
2312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2313 if (rcStrict == VINF_SUCCESS)
2314 *pu64 = (int8_t)u8;
2315 return rcStrict;
2316}
2317
2318
2319/**
2320 * Fetches the next signed byte from the opcode stream, extending it to
2321 * unsigned 64-bit.
2322 *
2323 * @returns Strict VBox status code.
2324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2325 * @param pu64 Where to return the unsigned qword.
2326 */
2327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2328{
2329 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2330 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2331 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2332
2333 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2334 pVCpu->iem.s.offOpcode = offOpcode + 1;
2335 return VINF_SUCCESS;
2336}
2337
2338#endif /* !IEM_WITH_SETJMP */
2339
2340
2341/**
2342 * Fetches the next signed byte from the opcode stream and sign-extending it to
2343 * a word, returning automatically on failure.
2344 *
2345 * @param a_pu64 Where to return the word.
2346 * @remark Implicitly references pVCpu.
2347 */
2348#ifndef IEM_WITH_SETJMP
2349# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2350 do \
2351 { \
2352 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2353 if (rcStrict2 != VINF_SUCCESS) \
2354 return rcStrict2; \
2355 } while (0)
2356#else
2357# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2358#endif
2359
2360
2361#ifndef IEM_WITH_SETJMP
2362
2363/**
2364 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2365 *
2366 * @returns Strict VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2368 * @param pu16 Where to return the opcode word.
2369 */
2370DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2371{
2372 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2373 if (rcStrict == VINF_SUCCESS)
2374 {
2375 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2376# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2377 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2378# else
2379 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2380# endif
2381 pVCpu->iem.s.offOpcode = offOpcode + 2;
2382 }
2383 else
2384 *pu16 = 0;
2385 return rcStrict;
2386}
2387
2388
2389/**
2390 * Fetches the next opcode word.
2391 *
2392 * @returns Strict VBox status code.
2393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2394 * @param pu16 Where to return the opcode word.
2395 */
2396DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2397{
2398 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2399 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2400 {
2401 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2402# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2403 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2404# else
2405 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2406# endif
2407 return VINF_SUCCESS;
2408 }
2409 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2410}
2411
2412#else /* IEM_WITH_SETJMP */
2413
2414/**
2415 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2416 *
2417 * @returns The opcode word.
2418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2419 */
2420DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2421{
2422# ifdef IEM_WITH_CODE_TLB
2423 uint16_t u16;
2424 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2425 return u16;
2426# else
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431 pVCpu->iem.s.offOpcode += 2;
2432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2433 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2434# else
2435 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2436# endif
2437 }
2438 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2439# endif
2440}
2441
2442
2443/**
2444 * Fetches the next opcode word, longjmp on error.
2445 *
2446 * @returns The opcode word.
2447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2448 */
2449DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2450{
2451# ifdef IEM_WITH_CODE_TLB
2452 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2453 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2454 if (RT_LIKELY( pbBuf != NULL
2455 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2456 {
2457 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 return *(uint16_t const *)&pbBuf[offBuf];
2460# else
2461 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2462# endif
2463 }
2464# else
2465 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2466 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2467 {
2468 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2469# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2470 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2471# else
2472 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2473# endif
2474 }
2475# endif
2476 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2477}
2478
2479#endif /* IEM_WITH_SETJMP */
2480
2481
2482/**
2483 * Fetches the next opcode word, returns automatically on failure.
2484 *
2485 * @param a_pu16 Where to return the opcode word.
2486 * @remark Implicitly references pVCpu.
2487 */
2488#ifndef IEM_WITH_SETJMP
2489# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2490 do \
2491 { \
2492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2493 if (rcStrict2 != VINF_SUCCESS) \
2494 return rcStrict2; \
2495 } while (0)
2496#else
2497# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2498#endif
2499
2500#ifndef IEM_WITH_SETJMP
2501
2502/**
2503 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2504 *
2505 * @returns Strict VBox status code.
2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2507 * @param pu32 Where to return the opcode double word.
2508 */
2509DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2510{
2511 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2512 if (rcStrict == VINF_SUCCESS)
2513 {
2514 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2515 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2516 pVCpu->iem.s.offOpcode = offOpcode + 2;
2517 }
2518 else
2519 *pu32 = 0;
2520 return rcStrict;
2521}
2522
2523
2524/**
2525 * Fetches the next opcode word, zero extending it to a double word.
2526 *
2527 * @returns Strict VBox status code.
2528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2529 * @param pu32 Where to return the opcode double word.
2530 */
2531DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2532{
2533 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2535 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2536
2537 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2538 pVCpu->iem.s.offOpcode = offOpcode + 2;
2539 return VINF_SUCCESS;
2540}
2541
2542#endif /* !IEM_WITH_SETJMP */
2543
2544
2545/**
2546 * Fetches the next opcode word and zero extends it to a double word, returns
2547 * automatically on failure.
2548 *
2549 * @param a_pu32 Where to return the opcode double word.
2550 * @remark Implicitly references pVCpu.
2551 */
2552#ifndef IEM_WITH_SETJMP
2553# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2554 do \
2555 { \
2556 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2557 if (rcStrict2 != VINF_SUCCESS) \
2558 return rcStrict2; \
2559 } while (0)
2560#else
2561# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2562#endif
2563
2564#ifndef IEM_WITH_SETJMP
2565
2566/**
2567 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2568 *
2569 * @returns Strict VBox status code.
2570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2571 * @param pu64 Where to return the opcode quad word.
2572 */
2573DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2574{
2575 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2576 if (rcStrict == VINF_SUCCESS)
2577 {
2578 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2579 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2580 pVCpu->iem.s.offOpcode = offOpcode + 2;
2581 }
2582 else
2583 *pu64 = 0;
2584 return rcStrict;
2585}
2586
2587
2588/**
2589 * Fetches the next opcode word, zero extending it to a quad word.
2590 *
2591 * @returns Strict VBox status code.
2592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2593 * @param pu64 Where to return the opcode quad word.
2594 */
2595DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2596{
2597 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2598 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2599 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2600
2601 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2602 pVCpu->iem.s.offOpcode = offOpcode + 2;
2603 return VINF_SUCCESS;
2604}
2605
2606#endif /* !IEM_WITH_SETJMP */
2607
2608/**
2609 * Fetches the next opcode word and zero extends it to a quad word, returns
2610 * automatically on failure.
2611 *
2612 * @param a_pu64 Where to return the opcode quad word.
2613 * @remark Implicitly references pVCpu.
2614 */
2615#ifndef IEM_WITH_SETJMP
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2617 do \
2618 { \
2619 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2620 if (rcStrict2 != VINF_SUCCESS) \
2621 return rcStrict2; \
2622 } while (0)
2623#else
2624# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2625#endif
2626
2627
2628#ifndef IEM_WITH_SETJMP
2629/**
2630 * Fetches the next signed word from the opcode stream.
2631 *
2632 * @returns Strict VBox status code.
2633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2634 * @param pi16 Where to return the signed word.
2635 */
2636DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2637{
2638 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2639}
2640#endif /* !IEM_WITH_SETJMP */
2641
2642
2643/**
2644 * Fetches the next signed word from the opcode stream, returning automatically
2645 * on failure.
2646 *
2647 * @param a_pi16 Where to return the signed word.
2648 * @remark Implicitly references pVCpu.
2649 */
2650#ifndef IEM_WITH_SETJMP
2651# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2652 do \
2653 { \
2654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2655 if (rcStrict2 != VINF_SUCCESS) \
2656 return rcStrict2; \
2657 } while (0)
2658#else
2659# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2660#endif
2661
2662#ifndef IEM_WITH_SETJMP
2663
2664/**
2665 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2666 *
2667 * @returns Strict VBox status code.
2668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2669 * @param pu32 Where to return the opcode dword.
2670 */
2671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2672{
2673 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2674 if (rcStrict == VINF_SUCCESS)
2675 {
2676 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2677# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2678 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2679# else
2680 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2681 pVCpu->iem.s.abOpcode[offOpcode + 1],
2682 pVCpu->iem.s.abOpcode[offOpcode + 2],
2683 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2684# endif
2685 pVCpu->iem.s.offOpcode = offOpcode + 4;
2686 }
2687 else
2688 *pu32 = 0;
2689 return rcStrict;
2690}
2691
2692
2693/**
2694 * Fetches the next opcode dword.
2695 *
2696 * @returns Strict VBox status code.
2697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2698 * @param pu32 Where to return the opcode double word.
2699 */
2700DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2701{
2702 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2703 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2704 {
2705 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2706# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2707 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2708# else
2709 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2710 pVCpu->iem.s.abOpcode[offOpcode + 1],
2711 pVCpu->iem.s.abOpcode[offOpcode + 2],
2712 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2713# endif
2714 return VINF_SUCCESS;
2715 }
2716 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2717}
2718
2719#else /* !IEM_WITH_SETJMP */
2720
2721/**
2722 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2723 *
2724 * @returns The opcode dword.
2725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2726 */
2727DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2728{
2729# ifdef IEM_WITH_CODE_TLB
2730 uint32_t u32;
2731 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2732 return u32;
2733# else
2734 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2735 if (rcStrict == VINF_SUCCESS)
2736 {
2737 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2738 pVCpu->iem.s.offOpcode = offOpcode + 4;
2739# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2740 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2741# else
2742 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746# endif
2747 }
2748 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2749# endif
2750}
2751
2752
2753/**
2754 * Fetches the next opcode dword, longjmp on error.
2755 *
2756 * @returns The opcode dword.
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 */
2759DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2760{
2761# ifdef IEM_WITH_CODE_TLB
2762 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2763 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2764 if (RT_LIKELY( pbBuf != NULL
2765 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2766 {
2767 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2768# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2769 return *(uint32_t const *)&pbBuf[offBuf];
2770# else
2771 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2772 pbBuf[offBuf + 1],
2773 pbBuf[offBuf + 2],
2774 pbBuf[offBuf + 3]);
2775# endif
2776 }
2777# else
2778 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2779 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2780 {
2781 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2782# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2783 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2784# else
2785 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2786 pVCpu->iem.s.abOpcode[offOpcode + 1],
2787 pVCpu->iem.s.abOpcode[offOpcode + 2],
2788 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2789# endif
2790 }
2791# endif
2792 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2793}
2794
2795#endif /* !IEM_WITH_SETJMP */
2796
2797
2798/**
2799 * Fetches the next opcode dword, returns automatically on failure.
2800 *
2801 * @param a_pu32 Where to return the opcode dword.
2802 * @remark Implicitly references pVCpu.
2803 */
2804#ifndef IEM_WITH_SETJMP
2805# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2806 do \
2807 { \
2808 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2809 if (rcStrict2 != VINF_SUCCESS) \
2810 return rcStrict2; \
2811 } while (0)
2812#else
2813# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2814#endif
2815
2816#ifndef IEM_WITH_SETJMP
2817
2818/**
2819 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2820 *
2821 * @returns Strict VBox status code.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 * @param pu64 Where to return the opcode dword.
2824 */
2825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2826{
2827 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2828 if (rcStrict == VINF_SUCCESS)
2829 {
2830 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2831 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2832 pVCpu->iem.s.abOpcode[offOpcode + 1],
2833 pVCpu->iem.s.abOpcode[offOpcode + 2],
2834 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2835 pVCpu->iem.s.offOpcode = offOpcode + 4;
2836 }
2837 else
2838 *pu64 = 0;
2839 return rcStrict;
2840}
2841
2842
2843/**
2844 * Fetches the next opcode dword, zero extending it to a quad word.
2845 *
2846 * @returns Strict VBox status code.
2847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2848 * @param pu64 Where to return the opcode quad word.
2849 */
2850DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2851{
2852 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2853 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2854 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2855
2856 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2857 pVCpu->iem.s.abOpcode[offOpcode + 1],
2858 pVCpu->iem.s.abOpcode[offOpcode + 2],
2859 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2860 pVCpu->iem.s.offOpcode = offOpcode + 4;
2861 return VINF_SUCCESS;
2862}
2863
2864#endif /* !IEM_WITH_SETJMP */
2865
2866
2867/**
2868 * Fetches the next opcode dword and zero extends it to a quad word, returns
2869 * automatically on failure.
2870 *
2871 * @param a_pu64 Where to return the opcode quad word.
2872 * @remark Implicitly references pVCpu.
2873 */
2874#ifndef IEM_WITH_SETJMP
2875# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2876 do \
2877 { \
2878 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2879 if (rcStrict2 != VINF_SUCCESS) \
2880 return rcStrict2; \
2881 } while (0)
2882#else
2883# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2884#endif
2885
2886
2887#ifndef IEM_WITH_SETJMP
2888/**
2889 * Fetches the next signed double word from the opcode stream.
2890 *
2891 * @returns Strict VBox status code.
2892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2893 * @param pi32 Where to return the signed double word.
2894 */
2895DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2896{
2897 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2898}
2899#endif
2900
2901/**
2902 * Fetches the next signed double word from the opcode stream, returning
2903 * automatically on failure.
2904 *
2905 * @param a_pi32 Where to return the signed double word.
2906 * @remark Implicitly references pVCpu.
2907 */
2908#ifndef IEM_WITH_SETJMP
2909# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2910 do \
2911 { \
2912 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2913 if (rcStrict2 != VINF_SUCCESS) \
2914 return rcStrict2; \
2915 } while (0)
2916#else
2917# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2918#endif
2919
2920#ifndef IEM_WITH_SETJMP
2921
2922/**
2923 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2924 *
2925 * @returns Strict VBox status code.
2926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2927 * @param pu64 Where to return the opcode qword.
2928 */
2929DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2930{
2931 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2932 if (rcStrict == VINF_SUCCESS)
2933 {
2934 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2935 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2936 pVCpu->iem.s.abOpcode[offOpcode + 1],
2937 pVCpu->iem.s.abOpcode[offOpcode + 2],
2938 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2939 pVCpu->iem.s.offOpcode = offOpcode + 4;
2940 }
2941 else
2942 *pu64 = 0;
2943 return rcStrict;
2944}
2945
2946
2947/**
2948 * Fetches the next opcode dword, sign extending it into a quad word.
2949 *
2950 * @returns Strict VBox status code.
2951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2952 * @param pu64 Where to return the opcode quad word.
2953 */
2954DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2955{
2956 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2957 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2958 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2959
2960 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2961 pVCpu->iem.s.abOpcode[offOpcode + 1],
2962 pVCpu->iem.s.abOpcode[offOpcode + 2],
2963 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2964 *pu64 = i32;
2965 pVCpu->iem.s.offOpcode = offOpcode + 4;
2966 return VINF_SUCCESS;
2967}
2968
2969#endif /* !IEM_WITH_SETJMP */
2970
2971
2972/**
2973 * Fetches the next opcode double word and sign extends it to a quad word,
2974 * returns automatically on failure.
2975 *
2976 * @param a_pu64 Where to return the opcode quad word.
2977 * @remark Implicitly references pVCpu.
2978 */
2979#ifndef IEM_WITH_SETJMP
2980# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2981 do \
2982 { \
2983 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2984 if (rcStrict2 != VINF_SUCCESS) \
2985 return rcStrict2; \
2986 } while (0)
2987#else
2988# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2989#endif
2990
2991#ifndef IEM_WITH_SETJMP
2992
2993/**
2994 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2995 *
2996 * @returns Strict VBox status code.
2997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2998 * @param pu64 Where to return the opcode qword.
2999 */
3000DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3001{
3002 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3003 if (rcStrict == VINF_SUCCESS)
3004 {
3005 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3006# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3007 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3008# else
3009 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3010 pVCpu->iem.s.abOpcode[offOpcode + 1],
3011 pVCpu->iem.s.abOpcode[offOpcode + 2],
3012 pVCpu->iem.s.abOpcode[offOpcode + 3],
3013 pVCpu->iem.s.abOpcode[offOpcode + 4],
3014 pVCpu->iem.s.abOpcode[offOpcode + 5],
3015 pVCpu->iem.s.abOpcode[offOpcode + 6],
3016 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3017# endif
3018 pVCpu->iem.s.offOpcode = offOpcode + 8;
3019 }
3020 else
3021 *pu64 = 0;
3022 return rcStrict;
3023}
3024
3025
3026/**
3027 * Fetches the next opcode qword.
3028 *
3029 * @returns Strict VBox status code.
3030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3031 * @param pu64 Where to return the opcode qword.
3032 */
3033DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3034{
3035 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3036 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3037 {
3038# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3039 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3040# else
3041 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3042 pVCpu->iem.s.abOpcode[offOpcode + 1],
3043 pVCpu->iem.s.abOpcode[offOpcode + 2],
3044 pVCpu->iem.s.abOpcode[offOpcode + 3],
3045 pVCpu->iem.s.abOpcode[offOpcode + 4],
3046 pVCpu->iem.s.abOpcode[offOpcode + 5],
3047 pVCpu->iem.s.abOpcode[offOpcode + 6],
3048 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3049# endif
3050 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3051 return VINF_SUCCESS;
3052 }
3053 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3054}
3055
3056#else /* IEM_WITH_SETJMP */
3057
3058/**
3059 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3060 *
3061 * @returns The opcode qword.
3062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3063 */
3064DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3065{
3066# ifdef IEM_WITH_CODE_TLB
3067 uint64_t u64;
3068 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3069 return u64;
3070# else
3071 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3072 if (rcStrict == VINF_SUCCESS)
3073 {
3074 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3075 pVCpu->iem.s.offOpcode = offOpcode + 8;
3076# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3077 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3078# else
3079 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3080 pVCpu->iem.s.abOpcode[offOpcode + 1],
3081 pVCpu->iem.s.abOpcode[offOpcode + 2],
3082 pVCpu->iem.s.abOpcode[offOpcode + 3],
3083 pVCpu->iem.s.abOpcode[offOpcode + 4],
3084 pVCpu->iem.s.abOpcode[offOpcode + 5],
3085 pVCpu->iem.s.abOpcode[offOpcode + 6],
3086 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3087# endif
3088 }
3089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3090# endif
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword, longjmp on error.
3096 *
3097 * @returns The opcode qword.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 */
3100DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3101{
3102# ifdef IEM_WITH_CODE_TLB
3103 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3104 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3105 if (RT_LIKELY( pbBuf != NULL
3106 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3107 {
3108 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3109# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3110 return *(uint64_t const *)&pbBuf[offBuf];
3111# else
3112 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3113 pbBuf[offBuf + 1],
3114 pbBuf[offBuf + 2],
3115 pbBuf[offBuf + 3],
3116 pbBuf[offBuf + 4],
3117 pbBuf[offBuf + 5],
3118 pbBuf[offBuf + 6],
3119 pbBuf[offBuf + 7]);
3120# endif
3121 }
3122# else
3123 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3124 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3125 {
3126 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3127# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3128 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3129# else
3130 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3131 pVCpu->iem.s.abOpcode[offOpcode + 1],
3132 pVCpu->iem.s.abOpcode[offOpcode + 2],
3133 pVCpu->iem.s.abOpcode[offOpcode + 3],
3134 pVCpu->iem.s.abOpcode[offOpcode + 4],
3135 pVCpu->iem.s.abOpcode[offOpcode + 5],
3136 pVCpu->iem.s.abOpcode[offOpcode + 6],
3137 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3138# endif
3139 }
3140# endif
3141 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3142}
3143
3144#endif /* IEM_WITH_SETJMP */
3145
3146/**
3147 * Fetches the next opcode quad word, returns automatically on failure.
3148 *
3149 * @param a_pu64 Where to return the opcode quad word.
3150 * @remark Implicitly references pVCpu.
3151 */
3152#ifndef IEM_WITH_SETJMP
3153# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3154 do \
3155 { \
3156 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3157 if (rcStrict2 != VINF_SUCCESS) \
3158 return rcStrict2; \
3159 } while (0)
3160#else
3161# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3162#endif
3163
3164
3165/** @name Misc Worker Functions.
3166 * @{
3167 */
3168
3169/**
3170 * Gets the exception class for the specified exception vector.
3171 *
3172 * @returns The class of the specified exception.
3173 * @param uVector The exception vector.
3174 */
3175IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3176{
3177 Assert(uVector <= X86_XCPT_LAST);
3178 switch (uVector)
3179 {
3180 case X86_XCPT_DE:
3181 case X86_XCPT_TS:
3182 case X86_XCPT_NP:
3183 case X86_XCPT_SS:
3184 case X86_XCPT_GP:
3185 case X86_XCPT_SX: /* AMD only */
3186 return IEMXCPTCLASS_CONTRIBUTORY;
3187
3188 case X86_XCPT_PF:
3189 case X86_XCPT_VE: /* Intel only */
3190 return IEMXCPTCLASS_PAGE_FAULT;
3191
3192 case X86_XCPT_DF:
3193 return IEMXCPTCLASS_DOUBLE_FAULT;
3194 }
3195 return IEMXCPTCLASS_BENIGN;
3196}
3197
3198
3199/**
3200 * Evaluates how to handle an exception caused during delivery of another event
3201 * (exception / interrupt).
3202 *
3203 * @returns How to handle the recursive exception.
3204 * @param pVCpu The cross context virtual CPU structure of the
3205 * calling thread.
3206 * @param fPrevFlags The flags of the previous event.
3207 * @param uPrevVector The vector of the previous event.
3208 * @param fCurFlags The flags of the current exception.
3209 * @param uCurVector The vector of the current exception.
3210 * @param pfXcptRaiseInfo Where to store additional information about the
3211 * exception condition. Optional.
3212 */
3213VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3214 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3215{
3216 /*
3217 * Only CPU exceptions can be raised while delivering other events, software interrupt
3218 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3219 */
3220 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3221 Assert(pVCpu); RT_NOREF(pVCpu);
3222 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3223
3224 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3225 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3226 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3227 {
3228 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3229 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3230 {
3231 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3232 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3233 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3234 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3235 {
3236 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3237 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3238 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3239 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3240 uCurVector, pVCpu->cpum.GstCtx.cr2));
3241 }
3242 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3243 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3244 {
3245 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3246 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3247 }
3248 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3249 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3250 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3251 {
3252 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3253 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3254 }
3255 }
3256 else
3257 {
3258 if (uPrevVector == X86_XCPT_NMI)
3259 {
3260 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3261 if (uCurVector == X86_XCPT_PF)
3262 {
3263 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3264 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3265 }
3266 }
3267 else if ( uPrevVector == X86_XCPT_AC
3268 && uCurVector == X86_XCPT_AC)
3269 {
3270 enmRaise = IEMXCPTRAISE_CPU_HANG;
3271 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3272 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3273 }
3274 }
3275 }
3276 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3277 {
3278 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3279 if (uCurVector == X86_XCPT_PF)
3280 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3281 }
3282 else
3283 {
3284 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3285 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3286 }
3287
3288 if (pfXcptRaiseInfo)
3289 *pfXcptRaiseInfo = fRaiseInfo;
3290 return enmRaise;
3291}
3292
3293
3294/**
3295 * Enters the CPU shutdown state initiated by a triple fault or other
3296 * unrecoverable conditions.
3297 *
3298 * @returns Strict VBox status code.
3299 * @param pVCpu The cross context virtual CPU structure of the
3300 * calling thread.
3301 */
3302IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3303{
3304 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3305 {
3306 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3307 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3308 }
3309
3310 RT_NOREF(pVCpu);
3311 return VINF_EM_TRIPLE_FAULT;
3312}
3313
3314
3315/**
3316 * Validates a new SS segment.
3317 *
3318 * @returns VBox strict status code.
3319 * @param pVCpu The cross context virtual CPU structure of the
3320 * calling thread.
3321 * @param NewSS The new SS selctor.
3322 * @param uCpl The CPL to load the stack for.
3323 * @param pDesc Where to return the descriptor.
3324 */
3325IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3326{
3327 /* Null selectors are not allowed (we're not called for dispatching
3328 interrupts with SS=0 in long mode). */
3329 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3330 {
3331 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3332 return iemRaiseTaskSwitchFault0(pVCpu);
3333 }
3334
3335 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3336 if ((NewSS & X86_SEL_RPL) != uCpl)
3337 {
3338 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3339 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3340 }
3341
3342 /*
3343 * Read the descriptor.
3344 */
3345 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3346 if (rcStrict != VINF_SUCCESS)
3347 return rcStrict;
3348
3349 /*
3350 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3351 */
3352 if (!pDesc->Legacy.Gen.u1DescType)
3353 {
3354 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3356 }
3357
3358 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3359 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3360 {
3361 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3362 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3363 }
3364 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3365 {
3366 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3367 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3368 }
3369
3370 /* Is it there? */
3371 /** @todo testcase: Is this checked before the canonical / limit check below? */
3372 if (!pDesc->Legacy.Gen.u1Present)
3373 {
3374 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3375 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3376 }
3377
3378 return VINF_SUCCESS;
3379}
3380
3381
3382/**
3383 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3384 * not.
3385 *
3386 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3387 */
3388#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3389# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3390#else
3391# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3392#endif
3393
3394/**
3395 * Updates the EFLAGS in the correct manner wrt. PATM.
3396 *
3397 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3398 * @param a_fEfl The new EFLAGS.
3399 */
3400#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3401# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3402#else
3403# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3404#endif
3405
3406
3407/** @} */
3408
3409/** @name Raising Exceptions.
3410 *
3411 * @{
3412 */
3413
3414
3415/**
3416 * Loads the specified stack far pointer from the TSS.
3417 *
3418 * @returns VBox strict status code.
3419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3420 * @param uCpl The CPL to load the stack for.
3421 * @param pSelSS Where to return the new stack segment.
3422 * @param puEsp Where to return the new stack pointer.
3423 */
3424IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3425{
3426 VBOXSTRICTRC rcStrict;
3427 Assert(uCpl < 4);
3428
3429 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3430 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3431 {
3432 /*
3433 * 16-bit TSS (X86TSS16).
3434 */
3435 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3436 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3437 {
3438 uint32_t off = uCpl * 4 + 2;
3439 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3440 {
3441 /** @todo check actual access pattern here. */
3442 uint32_t u32Tmp = 0; /* gcc maybe... */
3443 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3444 if (rcStrict == VINF_SUCCESS)
3445 {
3446 *puEsp = RT_LOWORD(u32Tmp);
3447 *pSelSS = RT_HIWORD(u32Tmp);
3448 return VINF_SUCCESS;
3449 }
3450 }
3451 else
3452 {
3453 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3454 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3455 }
3456 break;
3457 }
3458
3459 /*
3460 * 32-bit TSS (X86TSS32).
3461 */
3462 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3463 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3464 {
3465 uint32_t off = uCpl * 8 + 4;
3466 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3467 {
3468/** @todo check actual access pattern here. */
3469 uint64_t u64Tmp;
3470 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3471 if (rcStrict == VINF_SUCCESS)
3472 {
3473 *puEsp = u64Tmp & UINT32_MAX;
3474 *pSelSS = (RTSEL)(u64Tmp >> 32);
3475 return VINF_SUCCESS;
3476 }
3477 }
3478 else
3479 {
3480 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3481 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3482 }
3483 break;
3484 }
3485
3486 default:
3487 AssertFailed();
3488 rcStrict = VERR_IEM_IPE_4;
3489 break;
3490 }
3491
3492 *puEsp = 0; /* make gcc happy */
3493 *pSelSS = 0; /* make gcc happy */
3494 return rcStrict;
3495}
3496
3497
3498/**
3499 * Loads the specified stack pointer from the 64-bit TSS.
3500 *
3501 * @returns VBox strict status code.
3502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3503 * @param uCpl The CPL to load the stack for.
3504 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3505 * @param puRsp Where to return the new stack pointer.
3506 */
3507IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3508{
3509 Assert(uCpl < 4);
3510 Assert(uIst < 8);
3511 *puRsp = 0; /* make gcc happy */
3512
3513 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3514 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3515
3516 uint32_t off;
3517 if (uIst)
3518 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3519 else
3520 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3521 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3522 {
3523 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3524 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526
3527 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3528}
3529
3530
3531/**
3532 * Adjust the CPU state according to the exception being raised.
3533 *
3534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3535 * @param u8Vector The exception that has been raised.
3536 */
3537DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3538{
3539 switch (u8Vector)
3540 {
3541 case X86_XCPT_DB:
3542 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3543 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3544 break;
3545 /** @todo Read the AMD and Intel exception reference... */
3546 }
3547}
3548
3549
3550/**
3551 * Implements exceptions and interrupts for real mode.
3552 *
3553 * @returns VBox strict status code.
3554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3555 * @param cbInstr The number of bytes to offset rIP by in the return
3556 * address.
3557 * @param u8Vector The interrupt / exception vector number.
3558 * @param fFlags The flags.
3559 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3560 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3561 */
3562IEM_STATIC VBOXSTRICTRC
3563iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3564 uint8_t cbInstr,
3565 uint8_t u8Vector,
3566 uint32_t fFlags,
3567 uint16_t uErr,
3568 uint64_t uCr2)
3569{
3570 NOREF(uErr); NOREF(uCr2);
3571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3572
3573 /*
3574 * Read the IDT entry.
3575 */
3576 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3577 {
3578 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3579 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3580 }
3581 RTFAR16 Idte;
3582 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3583 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3584 {
3585 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3586 return rcStrict;
3587 }
3588
3589 /*
3590 * Push the stack frame.
3591 */
3592 uint16_t *pu16Frame;
3593 uint64_t uNewRsp;
3594 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3595 if (rcStrict != VINF_SUCCESS)
3596 return rcStrict;
3597
3598 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3599#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3600 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3601 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3602 fEfl |= UINT16_C(0xf000);
3603#endif
3604 pu16Frame[2] = (uint16_t)fEfl;
3605 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3606 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3607 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3608 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3609 return rcStrict;
3610
3611 /*
3612 * Load the vector address into cs:ip and make exception specific state
3613 * adjustments.
3614 */
3615 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3616 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3617 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3618 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3619 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3620 pVCpu->cpum.GstCtx.rip = Idte.off;
3621 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3622 IEMMISC_SET_EFL(pVCpu, fEfl);
3623
3624 /** @todo do we actually do this in real mode? */
3625 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3626 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3627
3628 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3629}
3630
3631
3632/**
3633 * Loads a NULL data selector into when coming from V8086 mode.
3634 *
3635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3636 * @param pSReg Pointer to the segment register.
3637 */
3638IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3639{
3640 pSReg->Sel = 0;
3641 pSReg->ValidSel = 0;
3642 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3643 {
3644 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3645 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3646 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3647 }
3648 else
3649 {
3650 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3651 /** @todo check this on AMD-V */
3652 pSReg->u64Base = 0;
3653 pSReg->u32Limit = 0;
3654 }
3655}
3656
3657
3658/**
3659 * Loads a segment selector during a task switch in V8086 mode.
3660 *
3661 * @param pSReg Pointer to the segment register.
3662 * @param uSel The selector value to load.
3663 */
3664IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3665{
3666 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3667 pSReg->Sel = uSel;
3668 pSReg->ValidSel = uSel;
3669 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3670 pSReg->u64Base = uSel << 4;
3671 pSReg->u32Limit = 0xffff;
3672 pSReg->Attr.u = 0xf3;
3673}
3674
3675
3676/**
3677 * Loads a NULL data selector into a selector register, both the hidden and
3678 * visible parts, in protected mode.
3679 *
3680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3681 * @param pSReg Pointer to the segment register.
3682 * @param uRpl The RPL.
3683 */
3684IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3685{
3686 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3687 * data selector in protected mode. */
3688 pSReg->Sel = uRpl;
3689 pSReg->ValidSel = uRpl;
3690 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3691 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3692 {
3693 /* VT-x (Intel 3960x) observed doing something like this. */
3694 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3695 pSReg->u32Limit = UINT32_MAX;
3696 pSReg->u64Base = 0;
3697 }
3698 else
3699 {
3700 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3701 pSReg->u32Limit = 0;
3702 pSReg->u64Base = 0;
3703 }
3704}
3705
3706
3707/**
3708 * Loads a segment selector during a task switch in protected mode.
3709 *
3710 * In this task switch scenario, we would throw \#TS exceptions rather than
3711 * \#GPs.
3712 *
3713 * @returns VBox strict status code.
3714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3715 * @param pSReg Pointer to the segment register.
3716 * @param uSel The new selector value.
3717 *
3718 * @remarks This does _not_ handle CS or SS.
3719 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3720 */
3721IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3722{
3723 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3724
3725 /* Null data selector. */
3726 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3727 {
3728 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3729 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3731 return VINF_SUCCESS;
3732 }
3733
3734 /* Fetch the descriptor. */
3735 IEMSELDESC Desc;
3736 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3737 if (rcStrict != VINF_SUCCESS)
3738 {
3739 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3740 VBOXSTRICTRC_VAL(rcStrict)));
3741 return rcStrict;
3742 }
3743
3744 /* Must be a data segment or readable code segment. */
3745 if ( !Desc.Legacy.Gen.u1DescType
3746 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3747 {
3748 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3749 Desc.Legacy.Gen.u4Type));
3750 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3751 }
3752
3753 /* Check privileges for data segments and non-conforming code segments. */
3754 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3755 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3756 {
3757 /* The RPL and the new CPL must be less than or equal to the DPL. */
3758 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3759 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3760 {
3761 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3762 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3763 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3764 }
3765 }
3766
3767 /* Is it there? */
3768 if (!Desc.Legacy.Gen.u1Present)
3769 {
3770 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3771 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3772 }
3773
3774 /* The base and limit. */
3775 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3776 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3777
3778 /*
3779 * Ok, everything checked out fine. Now set the accessed bit before
3780 * committing the result into the registers.
3781 */
3782 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3783 {
3784 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3788 }
3789
3790 /* Commit */
3791 pSReg->Sel = uSel;
3792 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3793 pSReg->u32Limit = cbLimit;
3794 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3795 pSReg->ValidSel = uSel;
3796 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3797 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3798 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3799
3800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3801 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3802 return VINF_SUCCESS;
3803}
3804
3805
3806/**
3807 * Performs a task switch.
3808 *
3809 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3810 * caller is responsible for performing the necessary checks (like DPL, TSS
3811 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3812 * reference for JMP, CALL, IRET.
3813 *
3814 * If the task switch is the due to a software interrupt or hardware exception,
3815 * the caller is responsible for validating the TSS selector and descriptor. See
3816 * Intel Instruction reference for INT n.
3817 *
3818 * @returns VBox strict status code.
3819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3820 * @param enmTaskSwitch What caused this task switch.
3821 * @param uNextEip The EIP effective after the task switch.
3822 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3823 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3824 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3825 * @param SelTSS The TSS selector of the new task.
3826 * @param pNewDescTSS Pointer to the new TSS descriptor.
3827 */
3828IEM_STATIC VBOXSTRICTRC
3829iemTaskSwitch(PVMCPU pVCpu,
3830 IEMTASKSWITCH enmTaskSwitch,
3831 uint32_t uNextEip,
3832 uint32_t fFlags,
3833 uint16_t uErr,
3834 uint64_t uCr2,
3835 RTSEL SelTSS,
3836 PIEMSELDESC pNewDescTSS)
3837{
3838 Assert(!IEM_IS_REAL_MODE(pVCpu));
3839 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3840 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3841
3842 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3843 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3844 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3845 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3846 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3847
3848 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3849 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3850
3851 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3852 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3853
3854 /* Update CR2 in case it's a page-fault. */
3855 /** @todo This should probably be done much earlier in IEM/PGM. See
3856 * @bugref{5653#c49}. */
3857 if (fFlags & IEM_XCPT_FLAGS_CR2)
3858 pVCpu->cpum.GstCtx.cr2 = uCr2;
3859
3860 /*
3861 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3862 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3863 */
3864 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3865 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3866 if (uNewTSSLimit < uNewTSSLimitMin)
3867 {
3868 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3869 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3870 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3871 }
3872
3873 /*
3874 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3875 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3876 */
3877 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3878 {
3879 uint32_t const uExitInfo1 = SelTSS;
3880 uint32_t uExitInfo2 = uErr;
3881 switch (enmTaskSwitch)
3882 {
3883 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3884 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3885 default: break;
3886 }
3887 if (fFlags & IEM_XCPT_FLAGS_ERR)
3888 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3889 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3890 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3891
3892 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3893 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3894 RT_NOREF2(uExitInfo1, uExitInfo2);
3895 }
3896 /** @todo Nested-VMX task-switch intercept. */
3897
3898 /*
3899 * Check the current TSS limit. The last written byte to the current TSS during the
3900 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3901 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3902 *
3903 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3904 * end up with smaller than "legal" TSS limits.
3905 */
3906 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3907 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3908 if (uCurTSSLimit < uCurTSSLimitMin)
3909 {
3910 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3911 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3912 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3913 }
3914
3915 /*
3916 * Verify that the new TSS can be accessed and map it. Map only the required contents
3917 * and not the entire TSS.
3918 */
3919 void *pvNewTSS;
3920 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3921 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3922 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3923 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3924 * not perform correct translation if this happens. See Intel spec. 7.2.1
3925 * "Task-State Segment" */
3926 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3927 if (rcStrict != VINF_SUCCESS)
3928 {
3929 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3930 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3931 return rcStrict;
3932 }
3933
3934 /*
3935 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3936 */
3937 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
3938 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3939 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3940 {
3941 PX86DESC pDescCurTSS;
3942 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3943 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3944 if (rcStrict != VINF_SUCCESS)
3945 {
3946 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3947 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3948 return rcStrict;
3949 }
3950
3951 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3952 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3953 if (rcStrict != VINF_SUCCESS)
3954 {
3955 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3956 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3957 return rcStrict;
3958 }
3959
3960 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3961 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3962 {
3963 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3964 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3965 u32EFlags &= ~X86_EFL_NT;
3966 }
3967 }
3968
3969 /*
3970 * Save the CPU state into the current TSS.
3971 */
3972 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
3973 if (GCPtrNewTSS == GCPtrCurTSS)
3974 {
3975 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3976 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3977 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
3978 }
3979 if (fIsNewTSS386)
3980 {
3981 /*
3982 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3983 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3984 */
3985 void *pvCurTSS32;
3986 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3987 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3988 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3989 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3990 if (rcStrict != VINF_SUCCESS)
3991 {
3992 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3993 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3994 return rcStrict;
3995 }
3996
3997 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3998 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3999 pCurTSS32->eip = uNextEip;
4000 pCurTSS32->eflags = u32EFlags;
4001 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4002 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4003 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4004 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4005 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4006 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4007 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4008 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4009 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4010 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4011 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4012 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4013 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4014 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4015
4016 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4017 if (rcStrict != VINF_SUCCESS)
4018 {
4019 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4020 VBOXSTRICTRC_VAL(rcStrict)));
4021 return rcStrict;
4022 }
4023 }
4024 else
4025 {
4026 /*
4027 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4028 */
4029 void *pvCurTSS16;
4030 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4031 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4032 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4033 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4034 if (rcStrict != VINF_SUCCESS)
4035 {
4036 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4037 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4038 return rcStrict;
4039 }
4040
4041 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4042 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4043 pCurTSS16->ip = uNextEip;
4044 pCurTSS16->flags = u32EFlags;
4045 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4046 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4047 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4048 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4049 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4050 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4051 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4052 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4053 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4054 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4055 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4056 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4057
4058 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4059 if (rcStrict != VINF_SUCCESS)
4060 {
4061 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4062 VBOXSTRICTRC_VAL(rcStrict)));
4063 return rcStrict;
4064 }
4065 }
4066
4067 /*
4068 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4069 */
4070 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4071 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4072 {
4073 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4074 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4075 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4076 }
4077
4078 /*
4079 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4080 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4081 */
4082 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4083 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4084 bool fNewDebugTrap;
4085 if (fIsNewTSS386)
4086 {
4087 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4088 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4089 uNewEip = pNewTSS32->eip;
4090 uNewEflags = pNewTSS32->eflags;
4091 uNewEax = pNewTSS32->eax;
4092 uNewEcx = pNewTSS32->ecx;
4093 uNewEdx = pNewTSS32->edx;
4094 uNewEbx = pNewTSS32->ebx;
4095 uNewEsp = pNewTSS32->esp;
4096 uNewEbp = pNewTSS32->ebp;
4097 uNewEsi = pNewTSS32->esi;
4098 uNewEdi = pNewTSS32->edi;
4099 uNewES = pNewTSS32->es;
4100 uNewCS = pNewTSS32->cs;
4101 uNewSS = pNewTSS32->ss;
4102 uNewDS = pNewTSS32->ds;
4103 uNewFS = pNewTSS32->fs;
4104 uNewGS = pNewTSS32->gs;
4105 uNewLdt = pNewTSS32->selLdt;
4106 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4107 }
4108 else
4109 {
4110 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4111 uNewCr3 = 0;
4112 uNewEip = pNewTSS16->ip;
4113 uNewEflags = pNewTSS16->flags;
4114 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4115 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4116 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4117 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4118 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4119 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4120 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4121 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4122 uNewES = pNewTSS16->es;
4123 uNewCS = pNewTSS16->cs;
4124 uNewSS = pNewTSS16->ss;
4125 uNewDS = pNewTSS16->ds;
4126 uNewFS = 0;
4127 uNewGS = 0;
4128 uNewLdt = pNewTSS16->selLdt;
4129 fNewDebugTrap = false;
4130 }
4131
4132 if (GCPtrNewTSS == GCPtrCurTSS)
4133 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4134 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4135
4136 /*
4137 * We're done accessing the new TSS.
4138 */
4139 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4140 if (rcStrict != VINF_SUCCESS)
4141 {
4142 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4143 return rcStrict;
4144 }
4145
4146 /*
4147 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4148 */
4149 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4150 {
4151 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4152 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4156 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* Check that the descriptor indicates the new TSS is available (not busy). */
4161 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4162 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4163 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4164
4165 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4166 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4167 if (rcStrict != VINF_SUCCESS)
4168 {
4169 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4170 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4171 return rcStrict;
4172 }
4173 }
4174
4175 /*
4176 * From this point on, we're technically in the new task. We will defer exceptions
4177 * until the completion of the task switch but before executing any instructions in the new task.
4178 */
4179 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4180 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4181 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4182 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4183 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4184 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4185 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4186
4187 /* Set the busy bit in TR. */
4188 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4189 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4190 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4191 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4192 {
4193 uNewEflags |= X86_EFL_NT;
4194 }
4195
4196 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4197 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4198 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4199
4200 pVCpu->cpum.GstCtx.eip = uNewEip;
4201 pVCpu->cpum.GstCtx.eax = uNewEax;
4202 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4203 pVCpu->cpum.GstCtx.edx = uNewEdx;
4204 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4205 pVCpu->cpum.GstCtx.esp = uNewEsp;
4206 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4207 pVCpu->cpum.GstCtx.esi = uNewEsi;
4208 pVCpu->cpum.GstCtx.edi = uNewEdi;
4209
4210 uNewEflags &= X86_EFL_LIVE_MASK;
4211 uNewEflags |= X86_EFL_RA1_MASK;
4212 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4213
4214 /*
4215 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4216 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4217 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4218 */
4219 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4220 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4221
4222 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4223 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4224
4225 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4226 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4227
4228 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4229 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4230
4231 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4232 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4233
4234 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4235 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4237
4238 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4239 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4240 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4241 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4242
4243 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4244 {
4245 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4246 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4247 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4248 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4249 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4250 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4251 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4252 }
4253
4254 /*
4255 * Switch CR3 for the new task.
4256 */
4257 if ( fIsNewTSS386
4258 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4259 {
4260 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4261 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4262 AssertRCSuccessReturn(rc, rc);
4263
4264 /* Inform PGM. */
4265 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4266 AssertRCReturn(rc, rc);
4267 /* ignore informational status codes */
4268
4269 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4270 }
4271
4272 /*
4273 * Switch LDTR for the new task.
4274 */
4275 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4276 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4277 else
4278 {
4279 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4280
4281 IEMSELDESC DescNewLdt;
4282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4283 if (rcStrict != VINF_SUCCESS)
4284 {
4285 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4286 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4287 return rcStrict;
4288 }
4289 if ( !DescNewLdt.Legacy.Gen.u1Present
4290 || DescNewLdt.Legacy.Gen.u1DescType
4291 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4292 {
4293 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4294 uNewLdt, DescNewLdt.Legacy.u));
4295 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4296 }
4297
4298 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4299 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4300 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4301 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4302 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4303 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4304 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4306 }
4307
4308 IEMSELDESC DescSS;
4309 if (IEM_IS_V86_MODE(pVCpu))
4310 {
4311 pVCpu->iem.s.uCpl = 3;
4312 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4313 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4314 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4315 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4316 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4317 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4318
4319 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4320 DescSS.Legacy.u = 0;
4321 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4322 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4323 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4324 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4325 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4326 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4327 DescSS.Legacy.Gen.u2Dpl = 3;
4328 }
4329 else
4330 {
4331 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4332
4333 /*
4334 * Load the stack segment for the new task.
4335 */
4336 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4337 {
4338 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4339 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4340 }
4341
4342 /* Fetch the descriptor. */
4343 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4347 VBOXSTRICTRC_VAL(rcStrict)));
4348 return rcStrict;
4349 }
4350
4351 /* SS must be a data segment and writable. */
4352 if ( !DescSS.Legacy.Gen.u1DescType
4353 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4354 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4355 {
4356 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4357 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4359 }
4360
4361 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4362 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4363 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4364 {
4365 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4366 uNewCpl));
4367 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4368 }
4369
4370 /* Is it there? */
4371 if (!DescSS.Legacy.Gen.u1Present)
4372 {
4373 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4374 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4375 }
4376
4377 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4378 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4379
4380 /* Set the accessed bit before committing the result into SS. */
4381 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4382 {
4383 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4384 if (rcStrict != VINF_SUCCESS)
4385 return rcStrict;
4386 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4387 }
4388
4389 /* Commit SS. */
4390 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4391 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4392 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4393 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4394 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4395 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4397
4398 /* CPL has changed, update IEM before loading rest of segments. */
4399 pVCpu->iem.s.uCpl = uNewCpl;
4400
4401 /*
4402 * Load the data segments for the new task.
4403 */
4404 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4405 if (rcStrict != VINF_SUCCESS)
4406 return rcStrict;
4407 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4408 if (rcStrict != VINF_SUCCESS)
4409 return rcStrict;
4410 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4411 if (rcStrict != VINF_SUCCESS)
4412 return rcStrict;
4413 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4414 if (rcStrict != VINF_SUCCESS)
4415 return rcStrict;
4416
4417 /*
4418 * Load the code segment for the new task.
4419 */
4420 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4421 {
4422 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4423 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4424 }
4425
4426 /* Fetch the descriptor. */
4427 IEMSELDESC DescCS;
4428 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4429 if (rcStrict != VINF_SUCCESS)
4430 {
4431 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4432 return rcStrict;
4433 }
4434
4435 /* CS must be a code segment. */
4436 if ( !DescCS.Legacy.Gen.u1DescType
4437 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4438 {
4439 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4440 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4441 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4442 }
4443
4444 /* For conforming CS, DPL must be less than or equal to the RPL. */
4445 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4446 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4447 {
4448 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4449 DescCS.Legacy.Gen.u2Dpl));
4450 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 /* For non-conforming CS, DPL must match RPL. */
4454 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4455 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4456 {
4457 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4458 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4459 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 /* Is it there? */
4463 if (!DescCS.Legacy.Gen.u1Present)
4464 {
4465 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4466 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4470 u64Base = X86DESC_BASE(&DescCS.Legacy);
4471
4472 /* Set the accessed bit before committing the result into CS. */
4473 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4474 {
4475 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4476 if (rcStrict != VINF_SUCCESS)
4477 return rcStrict;
4478 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4479 }
4480
4481 /* Commit CS. */
4482 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4483 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4484 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4485 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4486 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4487 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4489 }
4490
4491 /** @todo Debug trap. */
4492 if (fIsNewTSS386 && fNewDebugTrap)
4493 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4494
4495 /*
4496 * Construct the error code masks based on what caused this task switch.
4497 * See Intel Instruction reference for INT.
4498 */
4499 uint16_t uExt;
4500 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4501 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4502 {
4503 uExt = 1;
4504 }
4505 else
4506 uExt = 0;
4507
4508 /*
4509 * Push any error code on to the new stack.
4510 */
4511 if (fFlags & IEM_XCPT_FLAGS_ERR)
4512 {
4513 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4514 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4515 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4516
4517 /* Check that there is sufficient space on the stack. */
4518 /** @todo Factor out segment limit checking for normal/expand down segments
4519 * into a separate function. */
4520 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4521 {
4522 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4523 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4524 {
4525 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4526 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4527 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4528 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4529 }
4530 }
4531 else
4532 {
4533 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4534 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4535 {
4536 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4537 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4538 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4539 }
4540 }
4541
4542
4543 if (fIsNewTSS386)
4544 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4545 else
4546 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4547 if (rcStrict != VINF_SUCCESS)
4548 {
4549 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4550 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4551 return rcStrict;
4552 }
4553 }
4554
4555 /* Check the new EIP against the new CS limit. */
4556 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4557 {
4558 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4559 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4560 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4561 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4562 }
4563
4564 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4565 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4566}
4567
4568
4569/**
4570 * Implements exceptions and interrupts for protected mode.
4571 *
4572 * @returns VBox strict status code.
4573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4574 * @param cbInstr The number of bytes to offset rIP by in the return
4575 * address.
4576 * @param u8Vector The interrupt / exception vector number.
4577 * @param fFlags The flags.
4578 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4579 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4580 */
4581IEM_STATIC VBOXSTRICTRC
4582iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4583 uint8_t cbInstr,
4584 uint8_t u8Vector,
4585 uint32_t fFlags,
4586 uint16_t uErr,
4587 uint64_t uCr2)
4588{
4589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4590
4591 /*
4592 * Read the IDT entry.
4593 */
4594 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4595 {
4596 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4597 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4598 }
4599 X86DESC Idte;
4600 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4601 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4602 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4603 {
4604 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4605 return rcStrict;
4606 }
4607 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4608 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4609 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4610
4611 /*
4612 * Check the descriptor type, DPL and such.
4613 * ASSUMES this is done in the same order as described for call-gate calls.
4614 */
4615 if (Idte.Gate.u1DescType)
4616 {
4617 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4618 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4619 }
4620 bool fTaskGate = false;
4621 uint8_t f32BitGate = true;
4622 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4623 switch (Idte.Gate.u4Type)
4624 {
4625 case X86_SEL_TYPE_SYS_UNDEFINED:
4626 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4627 case X86_SEL_TYPE_SYS_LDT:
4628 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4629 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4630 case X86_SEL_TYPE_SYS_UNDEFINED2:
4631 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4632 case X86_SEL_TYPE_SYS_UNDEFINED3:
4633 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4634 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4635 case X86_SEL_TYPE_SYS_UNDEFINED4:
4636 {
4637 /** @todo check what actually happens when the type is wrong...
4638 * esp. call gates. */
4639 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4640 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4641 }
4642
4643 case X86_SEL_TYPE_SYS_286_INT_GATE:
4644 f32BitGate = false;
4645 RT_FALL_THRU();
4646 case X86_SEL_TYPE_SYS_386_INT_GATE:
4647 fEflToClear |= X86_EFL_IF;
4648 break;
4649
4650 case X86_SEL_TYPE_SYS_TASK_GATE:
4651 fTaskGate = true;
4652#ifndef IEM_IMPLEMENTS_TASKSWITCH
4653 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4654#endif
4655 break;
4656
4657 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4658 f32BitGate = false;
4659 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4660 break;
4661
4662 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4663 }
4664
4665 /* Check DPL against CPL if applicable. */
4666 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4667 {
4668 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4669 {
4670 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4671 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4672 }
4673 }
4674
4675 /* Is it there? */
4676 if (!Idte.Gate.u1Present)
4677 {
4678 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4679 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4680 }
4681
4682 /* Is it a task-gate? */
4683 if (fTaskGate)
4684 {
4685 /*
4686 * Construct the error code masks based on what caused this task switch.
4687 * See Intel Instruction reference for INT.
4688 */
4689 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4690 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4691 RTSEL SelTSS = Idte.Gate.u16Sel;
4692
4693 /*
4694 * Fetch the TSS descriptor in the GDT.
4695 */
4696 IEMSELDESC DescTSS;
4697 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4698 if (rcStrict != VINF_SUCCESS)
4699 {
4700 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4701 VBOXSTRICTRC_VAL(rcStrict)));
4702 return rcStrict;
4703 }
4704
4705 /* The TSS descriptor must be a system segment and be available (not busy). */
4706 if ( DescTSS.Legacy.Gen.u1DescType
4707 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4708 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4709 {
4710 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4711 u8Vector, SelTSS, DescTSS.Legacy.au64));
4712 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4713 }
4714
4715 /* The TSS must be present. */
4716 if (!DescTSS.Legacy.Gen.u1Present)
4717 {
4718 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4719 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4720 }
4721
4722 /* Do the actual task switch. */
4723 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4724 }
4725
4726 /* A null CS is bad. */
4727 RTSEL NewCS = Idte.Gate.u16Sel;
4728 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4729 {
4730 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4731 return iemRaiseGeneralProtectionFault0(pVCpu);
4732 }
4733
4734 /* Fetch the descriptor for the new CS. */
4735 IEMSELDESC DescCS;
4736 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4737 if (rcStrict != VINF_SUCCESS)
4738 {
4739 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4740 return rcStrict;
4741 }
4742
4743 /* Must be a code segment. */
4744 if (!DescCS.Legacy.Gen.u1DescType)
4745 {
4746 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4747 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4748 }
4749 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4750 {
4751 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4752 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4753 }
4754
4755 /* Don't allow lowering the privilege level. */
4756 /** @todo Does the lowering of privileges apply to software interrupts
4757 * only? This has bearings on the more-privileged or
4758 * same-privilege stack behavior further down. A testcase would
4759 * be nice. */
4760 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4761 {
4762 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4763 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4764 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4765 }
4766
4767 /* Make sure the selector is present. */
4768 if (!DescCS.Legacy.Gen.u1Present)
4769 {
4770 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4771 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4772 }
4773
4774 /* Check the new EIP against the new CS limit. */
4775 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4776 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4777 ? Idte.Gate.u16OffsetLow
4778 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4779 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4780 if (uNewEip > cbLimitCS)
4781 {
4782 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4783 u8Vector, uNewEip, cbLimitCS, NewCS));
4784 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4785 }
4786 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4787
4788 /* Calc the flag image to push. */
4789 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4790 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4791 fEfl &= ~X86_EFL_RF;
4792 else
4793 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4794
4795 /* From V8086 mode only go to CPL 0. */
4796 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4797 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4798 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4799 {
4800 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4801 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4802 }
4803
4804 /*
4805 * If the privilege level changes, we need to get a new stack from the TSS.
4806 * This in turns means validating the new SS and ESP...
4807 */
4808 if (uNewCpl != pVCpu->iem.s.uCpl)
4809 {
4810 RTSEL NewSS;
4811 uint32_t uNewEsp;
4812 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4813 if (rcStrict != VINF_SUCCESS)
4814 return rcStrict;
4815
4816 IEMSELDESC DescSS;
4817 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4818 if (rcStrict != VINF_SUCCESS)
4819 return rcStrict;
4820 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4821 if (!DescSS.Legacy.Gen.u1DefBig)
4822 {
4823 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4824 uNewEsp = (uint16_t)uNewEsp;
4825 }
4826
4827 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4828
4829 /* Check that there is sufficient space for the stack frame. */
4830 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4831 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4832 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4833 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4834
4835 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4836 {
4837 if ( uNewEsp - 1 > cbLimitSS
4838 || uNewEsp < cbStackFrame)
4839 {
4840 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4841 u8Vector, NewSS, uNewEsp, cbStackFrame));
4842 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4843 }
4844 }
4845 else
4846 {
4847 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4848 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4849 {
4850 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4851 u8Vector, NewSS, uNewEsp, cbStackFrame));
4852 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4853 }
4854 }
4855
4856 /*
4857 * Start making changes.
4858 */
4859
4860 /* Set the new CPL so that stack accesses use it. */
4861 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4862 pVCpu->iem.s.uCpl = uNewCpl;
4863
4864 /* Create the stack frame. */
4865 RTPTRUNION uStackFrame;
4866 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4867 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4868 if (rcStrict != VINF_SUCCESS)
4869 return rcStrict;
4870 void * const pvStackFrame = uStackFrame.pv;
4871 if (f32BitGate)
4872 {
4873 if (fFlags & IEM_XCPT_FLAGS_ERR)
4874 *uStackFrame.pu32++ = uErr;
4875 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4876 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4877 uStackFrame.pu32[2] = fEfl;
4878 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4879 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4880 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4881 if (fEfl & X86_EFL_VM)
4882 {
4883 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4884 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4885 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4886 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4887 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4888 }
4889 }
4890 else
4891 {
4892 if (fFlags & IEM_XCPT_FLAGS_ERR)
4893 *uStackFrame.pu16++ = uErr;
4894 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4895 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4896 uStackFrame.pu16[2] = fEfl;
4897 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4898 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4899 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4900 if (fEfl & X86_EFL_VM)
4901 {
4902 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4903 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4904 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4905 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4906 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4907 }
4908 }
4909 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4910 if (rcStrict != VINF_SUCCESS)
4911 return rcStrict;
4912
4913 /* Mark the selectors 'accessed' (hope this is the correct time). */
4914 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4915 * after pushing the stack frame? (Write protect the gdt + stack to
4916 * find out.) */
4917 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4918 {
4919 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4920 if (rcStrict != VINF_SUCCESS)
4921 return rcStrict;
4922 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4923 }
4924
4925 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4926 {
4927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4928 if (rcStrict != VINF_SUCCESS)
4929 return rcStrict;
4930 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4931 }
4932
4933 /*
4934 * Start comitting the register changes (joins with the DPL=CPL branch).
4935 */
4936 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
4937 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
4938 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4939 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
4940 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4941 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4942 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4943 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4944 * SP is loaded).
4945 * Need to check the other combinations too:
4946 * - 16-bit TSS, 32-bit handler
4947 * - 32-bit TSS, 16-bit handler */
4948 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
4949 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
4950 else
4951 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
4952
4953 if (fEfl & X86_EFL_VM)
4954 {
4955 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
4956 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
4957 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
4958 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
4959 }
4960 }
4961 /*
4962 * Same privilege, no stack change and smaller stack frame.
4963 */
4964 else
4965 {
4966 uint64_t uNewRsp;
4967 RTPTRUNION uStackFrame;
4968 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4969 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4970 if (rcStrict != VINF_SUCCESS)
4971 return rcStrict;
4972 void * const pvStackFrame = uStackFrame.pv;
4973
4974 if (f32BitGate)
4975 {
4976 if (fFlags & IEM_XCPT_FLAGS_ERR)
4977 *uStackFrame.pu32++ = uErr;
4978 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4979 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4980 uStackFrame.pu32[2] = fEfl;
4981 }
4982 else
4983 {
4984 if (fFlags & IEM_XCPT_FLAGS_ERR)
4985 *uStackFrame.pu16++ = uErr;
4986 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4987 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4988 uStackFrame.pu16[2] = fEfl;
4989 }
4990 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993
4994 /* Mark the CS selector as 'accessed'. */
4995 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4996 {
4997 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4998 if (rcStrict != VINF_SUCCESS)
4999 return rcStrict;
5000 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5001 }
5002
5003 /*
5004 * Start committing the register changes (joins with the other branch).
5005 */
5006 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5007 }
5008
5009 /* ... register committing continues. */
5010 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5011 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5012 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5013 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5014 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5015 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5016
5017 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5018 fEfl &= ~fEflToClear;
5019 IEMMISC_SET_EFL(pVCpu, fEfl);
5020
5021 if (fFlags & IEM_XCPT_FLAGS_CR2)
5022 pVCpu->cpum.GstCtx.cr2 = uCr2;
5023
5024 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5025 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5026
5027 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5028}
5029
5030
5031/**
5032 * Implements exceptions and interrupts for long mode.
5033 *
5034 * @returns VBox strict status code.
5035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5036 * @param cbInstr The number of bytes to offset rIP by in the return
5037 * address.
5038 * @param u8Vector The interrupt / exception vector number.
5039 * @param fFlags The flags.
5040 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5041 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5042 */
5043IEM_STATIC VBOXSTRICTRC
5044iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5045 uint8_t cbInstr,
5046 uint8_t u8Vector,
5047 uint32_t fFlags,
5048 uint16_t uErr,
5049 uint64_t uCr2)
5050{
5051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5052
5053 /*
5054 * Read the IDT entry.
5055 */
5056 uint16_t offIdt = (uint16_t)u8Vector << 4;
5057 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5058 {
5059 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5060 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5061 }
5062 X86DESC64 Idte;
5063 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5064 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5065 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5066 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5067 {
5068 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5069 return rcStrict;
5070 }
5071 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5072 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5073 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5074
5075 /*
5076 * Check the descriptor type, DPL and such.
5077 * ASSUMES this is done in the same order as described for call-gate calls.
5078 */
5079 if (Idte.Gate.u1DescType)
5080 {
5081 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5082 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5083 }
5084 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5085 switch (Idte.Gate.u4Type)
5086 {
5087 case AMD64_SEL_TYPE_SYS_INT_GATE:
5088 fEflToClear |= X86_EFL_IF;
5089 break;
5090 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5091 break;
5092
5093 default:
5094 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5096 }
5097
5098 /* Check DPL against CPL if applicable. */
5099 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5100 {
5101 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5102 {
5103 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5104 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5105 }
5106 }
5107
5108 /* Is it there? */
5109 if (!Idte.Gate.u1Present)
5110 {
5111 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5112 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5113 }
5114
5115 /* A null CS is bad. */
5116 RTSEL NewCS = Idte.Gate.u16Sel;
5117 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5118 {
5119 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5120 return iemRaiseGeneralProtectionFault0(pVCpu);
5121 }
5122
5123 /* Fetch the descriptor for the new CS. */
5124 IEMSELDESC DescCS;
5125 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5126 if (rcStrict != VINF_SUCCESS)
5127 {
5128 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5129 return rcStrict;
5130 }
5131
5132 /* Must be a 64-bit code segment. */
5133 if (!DescCS.Long.Gen.u1DescType)
5134 {
5135 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5136 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5137 }
5138 if ( !DescCS.Long.Gen.u1Long
5139 || DescCS.Long.Gen.u1DefBig
5140 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5141 {
5142 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5143 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5144 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5145 }
5146
5147 /* Don't allow lowering the privilege level. For non-conforming CS
5148 selectors, the CS.DPL sets the privilege level the trap/interrupt
5149 handler runs at. For conforming CS selectors, the CPL remains
5150 unchanged, but the CS.DPL must be <= CPL. */
5151 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5152 * when CPU in Ring-0. Result \#GP? */
5153 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5154 {
5155 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5156 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5157 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5158 }
5159
5160
5161 /* Make sure the selector is present. */
5162 if (!DescCS.Legacy.Gen.u1Present)
5163 {
5164 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5165 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5166 }
5167
5168 /* Check that the new RIP is canonical. */
5169 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5170 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5171 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5172 if (!IEM_IS_CANONICAL(uNewRip))
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5175 return iemRaiseGeneralProtectionFault0(pVCpu);
5176 }
5177
5178 /*
5179 * If the privilege level changes or if the IST isn't zero, we need to get
5180 * a new stack from the TSS.
5181 */
5182 uint64_t uNewRsp;
5183 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5184 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5185 if ( uNewCpl != pVCpu->iem.s.uCpl
5186 || Idte.Gate.u3IST != 0)
5187 {
5188 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5189 if (rcStrict != VINF_SUCCESS)
5190 return rcStrict;
5191 }
5192 else
5193 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5194 uNewRsp &= ~(uint64_t)0xf;
5195
5196 /*
5197 * Calc the flag image to push.
5198 */
5199 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5200 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5201 fEfl &= ~X86_EFL_RF;
5202 else
5203 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5204
5205 /*
5206 * Start making changes.
5207 */
5208 /* Set the new CPL so that stack accesses use it. */
5209 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5210 pVCpu->iem.s.uCpl = uNewCpl;
5211
5212 /* Create the stack frame. */
5213 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5214 RTPTRUNION uStackFrame;
5215 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5216 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5217 if (rcStrict != VINF_SUCCESS)
5218 return rcStrict;
5219 void * const pvStackFrame = uStackFrame.pv;
5220
5221 if (fFlags & IEM_XCPT_FLAGS_ERR)
5222 *uStackFrame.pu64++ = uErr;
5223 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5224 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5225 uStackFrame.pu64[2] = fEfl;
5226 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5227 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5228 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5229 if (rcStrict != VINF_SUCCESS)
5230 return rcStrict;
5231
5232 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5233 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5234 * after pushing the stack frame? (Write protect the gdt + stack to
5235 * find out.) */
5236 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5237 {
5238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5239 if (rcStrict != VINF_SUCCESS)
5240 return rcStrict;
5241 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5242 }
5243
5244 /*
5245 * Start comitting the register changes.
5246 */
5247 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5248 * hidden registers when interrupting 32-bit or 16-bit code! */
5249 if (uNewCpl != uOldCpl)
5250 {
5251 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5252 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5253 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5254 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5255 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5256 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5257 }
5258 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5259 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5260 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5261 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5262 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5263 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5264 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5265 pVCpu->cpum.GstCtx.rip = uNewRip;
5266
5267 fEfl &= ~fEflToClear;
5268 IEMMISC_SET_EFL(pVCpu, fEfl);
5269
5270 if (fFlags & IEM_XCPT_FLAGS_CR2)
5271 pVCpu->cpum.GstCtx.cr2 = uCr2;
5272
5273 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5274 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5275
5276 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5277}
5278
5279
5280/**
5281 * Implements exceptions and interrupts.
5282 *
5283 * All exceptions and interrupts goes thru this function!
5284 *
5285 * @returns VBox strict status code.
5286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5287 * @param cbInstr The number of bytes to offset rIP by in the return
5288 * address.
5289 * @param u8Vector The interrupt / exception vector number.
5290 * @param fFlags The flags.
5291 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5292 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5293 */
5294DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5295iemRaiseXcptOrInt(PVMCPU pVCpu,
5296 uint8_t cbInstr,
5297 uint8_t u8Vector,
5298 uint32_t fFlags,
5299 uint16_t uErr,
5300 uint64_t uCr2)
5301{
5302 /*
5303 * Get all the state that we might need here.
5304 */
5305#ifdef IN_RING0
5306 int rc = HMR0EnsureCompleteBasicContext(pVCpu, IEM_GET_CTX(pVCpu));
5307 AssertRCReturn(rc, rc);
5308#endif
5309 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5310 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5311
5312#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5313 /*
5314 * Flush prefetch buffer
5315 */
5316 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5317#endif
5318
5319 /*
5320 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5321 */
5322 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5323 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5324 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5325 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5326 {
5327 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5328 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5329 u8Vector = X86_XCPT_GP;
5330 uErr = 0;
5331 }
5332#ifdef DBGFTRACE_ENABLED
5333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5334 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5335 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5336#endif
5337
5338#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5339 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5340 {
5341 /*
5342 * If the event is being injected as part of VMRUN, it isn't subject to event
5343 * intercepts in the nested-guest. However, secondary exceptions that occur
5344 * during injection of any event -are- subject to exception intercepts.
5345 * See AMD spec. 15.20 "Event Injection".
5346 */
5347 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5348 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5349 else
5350 {
5351 /*
5352 * Check and handle if the event being raised is intercepted.
5353 */
5354 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5355 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5356 return rcStrict0;
5357 }
5358 }
5359#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5360
5361 /*
5362 * Do recursion accounting.
5363 */
5364 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5365 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5366 if (pVCpu->iem.s.cXcptRecursions == 0)
5367 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5368 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5369 else
5370 {
5371 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5372 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5373 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5374
5375 if (pVCpu->iem.s.cXcptRecursions >= 3)
5376 {
5377#ifdef DEBUG_bird
5378 AssertFailed();
5379#endif
5380 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5381 }
5382
5383 /*
5384 * Evaluate the sequence of recurring events.
5385 */
5386 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5387 NULL /* pXcptRaiseInfo */);
5388 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5389 { /* likely */ }
5390 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5391 {
5392 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5393 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5394 u8Vector = X86_XCPT_DF;
5395 uErr = 0;
5396 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5397 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5398 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5399 }
5400 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5401 {
5402 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5403 return iemInitiateCpuShutdown(pVCpu);
5404 }
5405 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5406 {
5407 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5408 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5409 if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5410 return VERR_EM_GUEST_CPU_HANG;
5411 }
5412 else
5413 {
5414 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5415 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5416 return VERR_IEM_IPE_9;
5417 }
5418
5419 /*
5420 * The 'EXT' bit is set when an exception occurs during deliver of an external
5421 * event (such as an interrupt or earlier exception)[1]. Privileged software
5422 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5423 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5424 *
5425 * [1] - Intel spec. 6.13 "Error Code"
5426 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5427 * [3] - Intel Instruction reference for INT n.
5428 */
5429 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5430 && (fFlags & IEM_XCPT_FLAGS_ERR)
5431 && u8Vector != X86_XCPT_PF
5432 && u8Vector != X86_XCPT_DF)
5433 {
5434 uErr |= X86_TRAP_ERR_EXTERNAL;
5435 }
5436 }
5437
5438 pVCpu->iem.s.cXcptRecursions++;
5439 pVCpu->iem.s.uCurXcpt = u8Vector;
5440 pVCpu->iem.s.fCurXcpt = fFlags;
5441 pVCpu->iem.s.uCurXcptErr = uErr;
5442 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5443
5444 /*
5445 * Extensive logging.
5446 */
5447#if defined(LOG_ENABLED) && defined(IN_RING3)
5448 if (LogIs3Enabled())
5449 {
5450 PVM pVM = pVCpu->CTX_SUFF(pVM);
5451 char szRegs[4096];
5452 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5453 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5454 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5455 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5456 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5457 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5458 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5459 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5460 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5461 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5462 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5463 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5464 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5465 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5466 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5467 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5468 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5469 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5470 " efer=%016VR{efer}\n"
5471 " pat=%016VR{pat}\n"
5472 " sf_mask=%016VR{sf_mask}\n"
5473 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5474 " lstar=%016VR{lstar}\n"
5475 " star=%016VR{star} cstar=%016VR{cstar}\n"
5476 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5477 );
5478
5479 char szInstr[256];
5480 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5481 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5482 szInstr, sizeof(szInstr), NULL);
5483 Log3(("%s%s\n", szRegs, szInstr));
5484 }
5485#endif /* LOG_ENABLED */
5486
5487 /*
5488 * Call the mode specific worker function.
5489 */
5490 VBOXSTRICTRC rcStrict;
5491 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5492 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5493 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5494 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5495 else
5496 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5497
5498 /* Flush the prefetch buffer. */
5499#ifdef IEM_WITH_CODE_TLB
5500 pVCpu->iem.s.pbInstrBuf = NULL;
5501#else
5502 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5503#endif
5504
5505 /*
5506 * Unwind.
5507 */
5508 pVCpu->iem.s.cXcptRecursions--;
5509 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5510 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5511 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5512 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5513 pVCpu->iem.s.cXcptRecursions + 1));
5514 return rcStrict;
5515}
5516
5517#ifdef IEM_WITH_SETJMP
5518/**
5519 * See iemRaiseXcptOrInt. Will not return.
5520 */
5521IEM_STATIC DECL_NO_RETURN(void)
5522iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5523 uint8_t cbInstr,
5524 uint8_t u8Vector,
5525 uint32_t fFlags,
5526 uint16_t uErr,
5527 uint64_t uCr2)
5528{
5529 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5530 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5531}
5532#endif
5533
5534
5535/** \#DE - 00. */
5536DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5537{
5538 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5539}
5540
5541
5542/** \#DB - 01.
5543 * @note This automatically clear DR7.GD. */
5544DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5545{
5546 /** @todo set/clear RF. */
5547 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5548 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5549}
5550
5551
5552/** \#BR - 05. */
5553DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5554{
5555 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5556}
5557
5558
5559/** \#UD - 06. */
5560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5561{
5562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5563}
5564
5565
5566/** \#NM - 07. */
5567DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5568{
5569 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5570}
5571
5572
5573/** \#TS(err) - 0a. */
5574DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5575{
5576 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5577}
5578
5579
5580/** \#TS(tr) - 0a. */
5581DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5582{
5583 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5584 pVCpu->cpum.GstCtx.tr.Sel, 0);
5585}
5586
5587
5588/** \#TS(0) - 0a. */
5589DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5590{
5591 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5592 0, 0);
5593}
5594
5595
5596/** \#TS(err) - 0a. */
5597DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5598{
5599 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5600 uSel & X86_SEL_MASK_OFF_RPL, 0);
5601}
5602
5603
5604/** \#NP(err) - 0b. */
5605DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5606{
5607 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5608}
5609
5610
5611/** \#NP(sel) - 0b. */
5612DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5613{
5614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5615 uSel & ~X86_SEL_RPL, 0);
5616}
5617
5618
5619/** \#SS(seg) - 0c. */
5620DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5621{
5622 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5623 uSel & ~X86_SEL_RPL, 0);
5624}
5625
5626
5627/** \#SS(err) - 0c. */
5628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5629{
5630 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5631}
5632
5633
5634/** \#GP(n) - 0d. */
5635DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5636{
5637 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5638}
5639
5640
5641/** \#GP(0) - 0d. */
5642DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5643{
5644 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5645}
5646
5647#ifdef IEM_WITH_SETJMP
5648/** \#GP(0) - 0d. */
5649DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5650{
5651 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5652}
5653#endif
5654
5655
5656/** \#GP(sel) - 0d. */
5657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5658{
5659 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5660 Sel & ~X86_SEL_RPL, 0);
5661}
5662
5663
5664/** \#GP(0) - 0d. */
5665DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5666{
5667 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5668}
5669
5670
5671/** \#GP(sel) - 0d. */
5672DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5673{
5674 NOREF(iSegReg); NOREF(fAccess);
5675 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5676 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5677}
5678
5679#ifdef IEM_WITH_SETJMP
5680/** \#GP(sel) - 0d, longjmp. */
5681DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5682{
5683 NOREF(iSegReg); NOREF(fAccess);
5684 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5685 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5686}
5687#endif
5688
5689/** \#GP(sel) - 0d. */
5690DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5691{
5692 NOREF(Sel);
5693 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5694}
5695
5696#ifdef IEM_WITH_SETJMP
5697/** \#GP(sel) - 0d, longjmp. */
5698DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5699{
5700 NOREF(Sel);
5701 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5702}
5703#endif
5704
5705
5706/** \#GP(sel) - 0d. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5708{
5709 NOREF(iSegReg); NOREF(fAccess);
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5711}
5712
5713#ifdef IEM_WITH_SETJMP
5714/** \#GP(sel) - 0d, longjmp. */
5715DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5716 uint32_t fAccess)
5717{
5718 NOREF(iSegReg); NOREF(fAccess);
5719 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5720}
5721#endif
5722
5723
5724/** \#PF(n) - 0e. */
5725DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5726{
5727 uint16_t uErr;
5728 switch (rc)
5729 {
5730 case VERR_PAGE_NOT_PRESENT:
5731 case VERR_PAGE_TABLE_NOT_PRESENT:
5732 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5733 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5734 uErr = 0;
5735 break;
5736
5737 default:
5738 AssertMsgFailed(("%Rrc\n", rc));
5739 RT_FALL_THRU();
5740 case VERR_ACCESS_DENIED:
5741 uErr = X86_TRAP_PF_P;
5742 break;
5743
5744 /** @todo reserved */
5745 }
5746
5747 if (pVCpu->iem.s.uCpl == 3)
5748 uErr |= X86_TRAP_PF_US;
5749
5750 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5751 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5752 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5753 uErr |= X86_TRAP_PF_ID;
5754
5755#if 0 /* This is so much non-sense, really. Why was it done like that? */
5756 /* Note! RW access callers reporting a WRITE protection fault, will clear
5757 the READ flag before calling. So, read-modify-write accesses (RW)
5758 can safely be reported as READ faults. */
5759 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5760 uErr |= X86_TRAP_PF_RW;
5761#else
5762 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5763 {
5764 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5765 uErr |= X86_TRAP_PF_RW;
5766 }
5767#endif
5768
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5770 uErr, GCPtrWhere);
5771}
5772
5773#ifdef IEM_WITH_SETJMP
5774/** \#PF(n) - 0e, longjmp. */
5775IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5776{
5777 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5778}
5779#endif
5780
5781
5782/** \#MF(0) - 10. */
5783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5784{
5785 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5786}
5787
5788
5789/** \#AC(0) - 11. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5791{
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5793}
5794
5795
5796/**
5797 * Macro for calling iemCImplRaiseDivideError().
5798 *
5799 * This enables us to add/remove arguments and force different levels of
5800 * inlining as we wish.
5801 *
5802 * @return Strict VBox status code.
5803 */
5804#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5805IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5806{
5807 NOREF(cbInstr);
5808 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5809}
5810
5811
5812/**
5813 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5814 *
5815 * This enables us to add/remove arguments and force different levels of
5816 * inlining as we wish.
5817 *
5818 * @return Strict VBox status code.
5819 */
5820#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5821IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5822{
5823 NOREF(cbInstr);
5824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5825}
5826
5827
5828/**
5829 * Macro for calling iemCImplRaiseInvalidOpcode().
5830 *
5831 * This enables us to add/remove arguments and force different levels of
5832 * inlining as we wish.
5833 *
5834 * @return Strict VBox status code.
5835 */
5836#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5837IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5838{
5839 NOREF(cbInstr);
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5841}
5842
5843
5844/** @} */
5845
5846
5847/*
5848 *
5849 * Helpers routines.
5850 * Helpers routines.
5851 * Helpers routines.
5852 *
5853 */
5854
5855/**
5856 * Recalculates the effective operand size.
5857 *
5858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5859 */
5860IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5861{
5862 switch (pVCpu->iem.s.enmCpuMode)
5863 {
5864 case IEMMODE_16BIT:
5865 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5866 break;
5867 case IEMMODE_32BIT:
5868 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5869 break;
5870 case IEMMODE_64BIT:
5871 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5872 {
5873 case 0:
5874 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5875 break;
5876 case IEM_OP_PRF_SIZE_OP:
5877 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5878 break;
5879 case IEM_OP_PRF_SIZE_REX_W:
5880 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5881 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5882 break;
5883 }
5884 break;
5885 default:
5886 AssertFailed();
5887 }
5888}
5889
5890
5891/**
5892 * Sets the default operand size to 64-bit and recalculates the effective
5893 * operand size.
5894 *
5895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5896 */
5897IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5898{
5899 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5900 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5901 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5902 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5903 else
5904 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5905}
5906
5907
5908/*
5909 *
5910 * Common opcode decoders.
5911 * Common opcode decoders.
5912 * Common opcode decoders.
5913 *
5914 */
5915//#include <iprt/mem.h>
5916
5917/**
5918 * Used to add extra details about a stub case.
5919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5920 */
5921IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5922{
5923#if defined(LOG_ENABLED) && defined(IN_RING3)
5924 PVM pVM = pVCpu->CTX_SUFF(pVM);
5925 char szRegs[4096];
5926 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5927 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5928 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5929 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5930 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5931 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5932 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5933 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5934 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5935 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5936 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5937 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5938 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5939 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5940 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5941 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5942 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5943 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5944 " efer=%016VR{efer}\n"
5945 " pat=%016VR{pat}\n"
5946 " sf_mask=%016VR{sf_mask}\n"
5947 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5948 " lstar=%016VR{lstar}\n"
5949 " star=%016VR{star} cstar=%016VR{cstar}\n"
5950 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5951 );
5952
5953 char szInstr[256];
5954 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5955 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5956 szInstr, sizeof(szInstr), NULL);
5957
5958 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5959#else
5960 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
5961#endif
5962}
5963
5964/**
5965 * Complains about a stub.
5966 *
5967 * Providing two versions of this macro, one for daily use and one for use when
5968 * working on IEM.
5969 */
5970#if 0
5971# define IEMOP_BITCH_ABOUT_STUB() \
5972 do { \
5973 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5974 iemOpStubMsg2(pVCpu); \
5975 RTAssertPanic(); \
5976 } while (0)
5977#else
5978# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5979#endif
5980
5981/** Stubs an opcode. */
5982#define FNIEMOP_STUB(a_Name) \
5983 FNIEMOP_DEF(a_Name) \
5984 { \
5985 RT_NOREF_PV(pVCpu); \
5986 IEMOP_BITCH_ABOUT_STUB(); \
5987 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5988 } \
5989 typedef int ignore_semicolon
5990
5991/** Stubs an opcode. */
5992#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5993 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5994 { \
5995 RT_NOREF_PV(pVCpu); \
5996 RT_NOREF_PV(a_Name0); \
5997 IEMOP_BITCH_ABOUT_STUB(); \
5998 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5999 } \
6000 typedef int ignore_semicolon
6001
6002/** Stubs an opcode which currently should raise \#UD. */
6003#define FNIEMOP_UD_STUB(a_Name) \
6004 FNIEMOP_DEF(a_Name) \
6005 { \
6006 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6007 return IEMOP_RAISE_INVALID_OPCODE(); \
6008 } \
6009 typedef int ignore_semicolon
6010
6011/** Stubs an opcode which currently should raise \#UD. */
6012#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6013 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6014 { \
6015 RT_NOREF_PV(pVCpu); \
6016 RT_NOREF_PV(a_Name0); \
6017 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6018 return IEMOP_RAISE_INVALID_OPCODE(); \
6019 } \
6020 typedef int ignore_semicolon
6021
6022
6023
6024/** @name Register Access.
6025 * @{
6026 */
6027
6028/**
6029 * Gets a reference (pointer) to the specified hidden segment register.
6030 *
6031 * @returns Hidden register reference.
6032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6033 * @param iSegReg The segment register.
6034 */
6035IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6036{
6037 Assert(iSegReg < X86_SREG_COUNT);
6038 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6039 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6040
6041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6042 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6043 { /* likely */ }
6044 else
6045 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6046#else
6047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6048#endif
6049 return pSReg;
6050}
6051
6052
6053/**
6054 * Ensures that the given hidden segment register is up to date.
6055 *
6056 * @returns Hidden register reference.
6057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6058 * @param pSReg The segment register.
6059 */
6060IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6061{
6062#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6063 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6064 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6065#else
6066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6067 NOREF(pVCpu);
6068#endif
6069 return pSReg;
6070}
6071
6072
6073/**
6074 * Gets a reference (pointer) to the specified segment register (the selector
6075 * value).
6076 *
6077 * @returns Pointer to the selector variable.
6078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6079 * @param iSegReg The segment register.
6080 */
6081DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6082{
6083 Assert(iSegReg < X86_SREG_COUNT);
6084 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6085 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6086}
6087
6088
6089/**
6090 * Fetches the selector value of a segment register.
6091 *
6092 * @returns The selector value.
6093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6094 * @param iSegReg The segment register.
6095 */
6096DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6097{
6098 Assert(iSegReg < X86_SREG_COUNT);
6099 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6100 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6101}
6102
6103
6104/**
6105 * Fetches the base address value of a segment register.
6106 *
6107 * @returns The selector value.
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 * @param iSegReg The segment register.
6110 */
6111DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6112{
6113 Assert(iSegReg < X86_SREG_COUNT);
6114 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6115 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6116}
6117
6118
6119/**
6120 * Gets a reference (pointer) to the specified general purpose register.
6121 *
6122 * @returns Register reference.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 * @param iReg The general purpose register.
6125 */
6126DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6127{
6128 Assert(iReg < 16);
6129 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6130}
6131
6132
6133/**
6134 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6135 *
6136 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6137 *
6138 * @returns Register reference.
6139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6140 * @param iReg The register.
6141 */
6142DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6143{
6144 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6145 {
6146 Assert(iReg < 16);
6147 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6148 }
6149 /* high 8-bit register. */
6150 Assert(iReg < 8);
6151 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6152}
6153
6154
6155/**
6156 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6157 *
6158 * @returns Register reference.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iReg The register.
6161 */
6162DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6163{
6164 Assert(iReg < 16);
6165 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6166}
6167
6168
6169/**
6170 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6171 *
6172 * @returns Register reference.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 * @param iReg The register.
6175 */
6176DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6177{
6178 Assert(iReg < 16);
6179 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6180}
6181
6182
6183/**
6184 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6185 *
6186 * @returns Register reference.
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param iReg The register.
6189 */
6190DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6191{
6192 Assert(iReg < 64);
6193 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6194}
6195
6196
6197/**
6198 * Gets a reference (pointer) to the specified segment register's base address.
6199 *
6200 * @returns Segment register base address reference.
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 * @param iSegReg The segment selector.
6203 */
6204DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6205{
6206 Assert(iSegReg < X86_SREG_COUNT);
6207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6208 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6209}
6210
6211
6212/**
6213 * Fetches the value of a 8-bit general purpose register.
6214 *
6215 * @returns The register value.
6216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6217 * @param iReg The register.
6218 */
6219DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6220{
6221 return *iemGRegRefU8(pVCpu, iReg);
6222}
6223
6224
6225/**
6226 * Fetches the value of a 16-bit general purpose register.
6227 *
6228 * @returns The register value.
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 * @param iReg The register.
6231 */
6232DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6233{
6234 Assert(iReg < 16);
6235 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6236}
6237
6238
6239/**
6240 * Fetches the value of a 32-bit general purpose register.
6241 *
6242 * @returns The register value.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param iReg The register.
6245 */
6246DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6247{
6248 Assert(iReg < 16);
6249 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6250}
6251
6252
6253/**
6254 * Fetches the value of a 64-bit general purpose register.
6255 *
6256 * @returns The register value.
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param iReg The register.
6259 */
6260DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6261{
6262 Assert(iReg < 16);
6263 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6264}
6265
6266
6267/**
6268 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6269 *
6270 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6271 * segment limit.
6272 *
6273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6274 * @param offNextInstr The offset of the next instruction.
6275 */
6276IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6277{
6278 switch (pVCpu->iem.s.enmEffOpSize)
6279 {
6280 case IEMMODE_16BIT:
6281 {
6282 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6283 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6284 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6285 return iemRaiseGeneralProtectionFault0(pVCpu);
6286 pVCpu->cpum.GstCtx.rip = uNewIp;
6287 break;
6288 }
6289
6290 case IEMMODE_32BIT:
6291 {
6292 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6293 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6294
6295 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6296 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6297 return iemRaiseGeneralProtectionFault0(pVCpu);
6298 pVCpu->cpum.GstCtx.rip = uNewEip;
6299 break;
6300 }
6301
6302 case IEMMODE_64BIT:
6303 {
6304 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6305
6306 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6307 if (!IEM_IS_CANONICAL(uNewRip))
6308 return iemRaiseGeneralProtectionFault0(pVCpu);
6309 pVCpu->cpum.GstCtx.rip = uNewRip;
6310 break;
6311 }
6312
6313 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6314 }
6315
6316 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6317
6318#ifndef IEM_WITH_CODE_TLB
6319 /* Flush the prefetch buffer. */
6320 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6321#endif
6322
6323 return VINF_SUCCESS;
6324}
6325
6326
6327/**
6328 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6329 *
6330 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6331 * segment limit.
6332 *
6333 * @returns Strict VBox status code.
6334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6335 * @param offNextInstr The offset of the next instruction.
6336 */
6337IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6338{
6339 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6340
6341 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6342 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6343 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6344 return iemRaiseGeneralProtectionFault0(pVCpu);
6345 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6346 pVCpu->cpum.GstCtx.rip = uNewIp;
6347 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6348
6349#ifndef IEM_WITH_CODE_TLB
6350 /* Flush the prefetch buffer. */
6351 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6352#endif
6353
6354 return VINF_SUCCESS;
6355}
6356
6357
6358/**
6359 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6360 *
6361 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6362 * segment limit.
6363 *
6364 * @returns Strict VBox status code.
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param offNextInstr The offset of the next instruction.
6367 */
6368IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6369{
6370 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6371
6372 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6373 {
6374 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6375
6376 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6377 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6378 return iemRaiseGeneralProtectionFault0(pVCpu);
6379 pVCpu->cpum.GstCtx.rip = uNewEip;
6380 }
6381 else
6382 {
6383 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6384
6385 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6386 if (!IEM_IS_CANONICAL(uNewRip))
6387 return iemRaiseGeneralProtectionFault0(pVCpu);
6388 pVCpu->cpum.GstCtx.rip = uNewRip;
6389 }
6390 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6391
6392#ifndef IEM_WITH_CODE_TLB
6393 /* Flush the prefetch buffer. */
6394 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6395#endif
6396
6397 return VINF_SUCCESS;
6398}
6399
6400
6401/**
6402 * Performs a near jump to the specified address.
6403 *
6404 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6405 * segment limit.
6406 *
6407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6408 * @param uNewRip The new RIP value.
6409 */
6410IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6411{
6412 switch (pVCpu->iem.s.enmEffOpSize)
6413 {
6414 case IEMMODE_16BIT:
6415 {
6416 Assert(uNewRip <= UINT16_MAX);
6417 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6418 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6419 return iemRaiseGeneralProtectionFault0(pVCpu);
6420 /** @todo Test 16-bit jump in 64-bit mode. */
6421 pVCpu->cpum.GstCtx.rip = uNewRip;
6422 break;
6423 }
6424
6425 case IEMMODE_32BIT:
6426 {
6427 Assert(uNewRip <= UINT32_MAX);
6428 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6429 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6430
6431 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6432 return iemRaiseGeneralProtectionFault0(pVCpu);
6433 pVCpu->cpum.GstCtx.rip = uNewRip;
6434 break;
6435 }
6436
6437 case IEMMODE_64BIT:
6438 {
6439 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6440
6441 if (!IEM_IS_CANONICAL(uNewRip))
6442 return iemRaiseGeneralProtectionFault0(pVCpu);
6443 pVCpu->cpum.GstCtx.rip = uNewRip;
6444 break;
6445 }
6446
6447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6448 }
6449
6450 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6451
6452#ifndef IEM_WITH_CODE_TLB
6453 /* Flush the prefetch buffer. */
6454 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6455#endif
6456
6457 return VINF_SUCCESS;
6458}
6459
6460
6461/**
6462 * Get the address of the top of the stack.
6463 *
6464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6465 */
6466DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6467{
6468 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6469 return pVCpu->cpum.GstCtx.rsp;
6470 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6471 return pVCpu->cpum.GstCtx.esp;
6472 return pVCpu->cpum.GstCtx.sp;
6473}
6474
6475
6476/**
6477 * Updates the RIP/EIP/IP to point to the next instruction.
6478 *
6479 * This function leaves the EFLAGS.RF flag alone.
6480 *
6481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6482 * @param cbInstr The number of bytes to add.
6483 */
6484IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6485{
6486 switch (pVCpu->iem.s.enmCpuMode)
6487 {
6488 case IEMMODE_16BIT:
6489 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6490 pVCpu->cpum.GstCtx.eip += cbInstr;
6491 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6492 break;
6493
6494 case IEMMODE_32BIT:
6495 pVCpu->cpum.GstCtx.eip += cbInstr;
6496 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6497 break;
6498
6499 case IEMMODE_64BIT:
6500 pVCpu->cpum.GstCtx.rip += cbInstr;
6501 break;
6502 default: AssertFailed();
6503 }
6504}
6505
6506
6507#if 0
6508/**
6509 * Updates the RIP/EIP/IP to point to the next instruction.
6510 *
6511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6512 */
6513IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6514{
6515 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6516}
6517#endif
6518
6519
6520
6521/**
6522 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6523 *
6524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6525 * @param cbInstr The number of bytes to add.
6526 */
6527IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6528{
6529 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6530
6531 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6532#if ARCH_BITS >= 64
6533 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6534 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6535 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6536#else
6537 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6538 pVCpu->cpum.GstCtx.rip += cbInstr;
6539 else
6540 pVCpu->cpum.GstCtx.eip += cbInstr;
6541#endif
6542}
6543
6544
6545/**
6546 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6547 *
6548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6549 */
6550IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6551{
6552 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6553}
6554
6555
6556/**
6557 * Adds to the stack pointer.
6558 *
6559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6560 * @param cbToAdd The number of bytes to add (8-bit!).
6561 */
6562DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6563{
6564 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6565 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6566 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6567 pVCpu->cpum.GstCtx.esp += cbToAdd;
6568 else
6569 pVCpu->cpum.GstCtx.sp += cbToAdd;
6570}
6571
6572
6573/**
6574 * Subtracts from the stack pointer.
6575 *
6576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6577 * @param cbToSub The number of bytes to subtract (8-bit!).
6578 */
6579DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6580{
6581 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6582 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6583 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6584 pVCpu->cpum.GstCtx.esp -= cbToSub;
6585 else
6586 pVCpu->cpum.GstCtx.sp -= cbToSub;
6587}
6588
6589
6590/**
6591 * Adds to the temporary stack pointer.
6592 *
6593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6594 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6595 * @param cbToAdd The number of bytes to add (16-bit).
6596 */
6597DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6598{
6599 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6600 pTmpRsp->u += cbToAdd;
6601 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6602 pTmpRsp->DWords.dw0 += cbToAdd;
6603 else
6604 pTmpRsp->Words.w0 += cbToAdd;
6605}
6606
6607
6608/**
6609 * Subtracts from the temporary stack pointer.
6610 *
6611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6612 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6613 * @param cbToSub The number of bytes to subtract.
6614 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6615 * expecting that.
6616 */
6617DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6618{
6619 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6620 pTmpRsp->u -= cbToSub;
6621 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6622 pTmpRsp->DWords.dw0 -= cbToSub;
6623 else
6624 pTmpRsp->Words.w0 -= cbToSub;
6625}
6626
6627
6628/**
6629 * Calculates the effective stack address for a push of the specified size as
6630 * well as the new RSP value (upper bits may be masked).
6631 *
6632 * @returns Effective stack addressf for the push.
6633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6634 * @param cbItem The size of the stack item to pop.
6635 * @param puNewRsp Where to return the new RSP value.
6636 */
6637DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6638{
6639 RTUINT64U uTmpRsp;
6640 RTGCPTR GCPtrTop;
6641 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6642
6643 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6644 GCPtrTop = uTmpRsp.u -= cbItem;
6645 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6646 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6647 else
6648 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6649 *puNewRsp = uTmpRsp.u;
6650 return GCPtrTop;
6651}
6652
6653
6654/**
6655 * Gets the current stack pointer and calculates the value after a pop of the
6656 * specified size.
6657 *
6658 * @returns Current stack pointer.
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param cbItem The size of the stack item to pop.
6661 * @param puNewRsp Where to return the new RSP value.
6662 */
6663DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6664{
6665 RTUINT64U uTmpRsp;
6666 RTGCPTR GCPtrTop;
6667 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6668
6669 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6670 {
6671 GCPtrTop = uTmpRsp.u;
6672 uTmpRsp.u += cbItem;
6673 }
6674 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6675 {
6676 GCPtrTop = uTmpRsp.DWords.dw0;
6677 uTmpRsp.DWords.dw0 += cbItem;
6678 }
6679 else
6680 {
6681 GCPtrTop = uTmpRsp.Words.w0;
6682 uTmpRsp.Words.w0 += cbItem;
6683 }
6684 *puNewRsp = uTmpRsp.u;
6685 return GCPtrTop;
6686}
6687
6688
6689/**
6690 * Calculates the effective stack address for a push of the specified size as
6691 * well as the new temporary RSP value (upper bits may be masked).
6692 *
6693 * @returns Effective stack addressf for the push.
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param pTmpRsp The temporary stack pointer. This is updated.
6696 * @param cbItem The size of the stack item to pop.
6697 */
6698DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6699{
6700 RTGCPTR GCPtrTop;
6701
6702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6703 GCPtrTop = pTmpRsp->u -= cbItem;
6704 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6705 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6706 else
6707 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6708 return GCPtrTop;
6709}
6710
6711
6712/**
6713 * Gets the effective stack address for a pop of the specified size and
6714 * calculates and updates the temporary RSP.
6715 *
6716 * @returns Current stack pointer.
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param pTmpRsp The temporary stack pointer. This is updated.
6719 * @param cbItem The size of the stack item to pop.
6720 */
6721DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6722{
6723 RTGCPTR GCPtrTop;
6724 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6725 {
6726 GCPtrTop = pTmpRsp->u;
6727 pTmpRsp->u += cbItem;
6728 }
6729 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6730 {
6731 GCPtrTop = pTmpRsp->DWords.dw0;
6732 pTmpRsp->DWords.dw0 += cbItem;
6733 }
6734 else
6735 {
6736 GCPtrTop = pTmpRsp->Words.w0;
6737 pTmpRsp->Words.w0 += cbItem;
6738 }
6739 return GCPtrTop;
6740}
6741
6742/** @} */
6743
6744
6745/** @name FPU access and helpers.
6746 *
6747 * @{
6748 */
6749
6750
6751/**
6752 * Hook for preparing to use the host FPU.
6753 *
6754 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6755 *
6756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6757 */
6758DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6759{
6760#ifdef IN_RING3
6761 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6762#else
6763 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6764#endif
6765 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6766}
6767
6768
6769/**
6770 * Hook for preparing to use the host FPU for SSE.
6771 *
6772 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6773 *
6774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6775 */
6776DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6777{
6778 iemFpuPrepareUsage(pVCpu);
6779}
6780
6781
6782/**
6783 * Hook for preparing to use the host FPU for AVX.
6784 *
6785 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6786 *
6787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6788 */
6789DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6790{
6791 iemFpuPrepareUsage(pVCpu);
6792}
6793
6794
6795/**
6796 * Hook for actualizing the guest FPU state before the interpreter reads it.
6797 *
6798 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6799 *
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 */
6802DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6803{
6804#ifdef IN_RING3
6805 NOREF(pVCpu);
6806#else
6807 CPUMRZFpuStateActualizeForRead(pVCpu);
6808#endif
6809 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6810}
6811
6812
6813/**
6814 * Hook for actualizing the guest FPU state before the interpreter changes it.
6815 *
6816 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6817 *
6818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6819 */
6820DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6821{
6822#ifdef IN_RING3
6823 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6824#else
6825 CPUMRZFpuStateActualizeForChange(pVCpu);
6826#endif
6827 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6828}
6829
6830
6831/**
6832 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6833 * only.
6834 *
6835 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6836 *
6837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6838 */
6839DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6840{
6841#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6842 NOREF(pVCpu);
6843#else
6844 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6845#endif
6846 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6847}
6848
6849
6850/**
6851 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6852 * read+write.
6853 *
6854 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6855 *
6856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6857 */
6858DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6859{
6860#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6861 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6862#else
6863 CPUMRZFpuStateActualizeForChange(pVCpu);
6864#endif
6865 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6866}
6867
6868
6869/**
6870 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6871 * only.
6872 *
6873 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6874 *
6875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6876 */
6877DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6878{
6879#ifdef IN_RING3
6880 NOREF(pVCpu);
6881#else
6882 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6883#endif
6884 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6885}
6886
6887
6888/**
6889 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6890 * read+write.
6891 *
6892 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6893 *
6894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6895 */
6896DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6897{
6898#ifdef IN_RING3
6899 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6900#else
6901 CPUMRZFpuStateActualizeForChange(pVCpu);
6902#endif
6903 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6904}
6905
6906
6907/**
6908 * Stores a QNaN value into a FPU register.
6909 *
6910 * @param pReg Pointer to the register.
6911 */
6912DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6913{
6914 pReg->au32[0] = UINT32_C(0x00000000);
6915 pReg->au32[1] = UINT32_C(0xc0000000);
6916 pReg->au16[4] = UINT16_C(0xffff);
6917}
6918
6919
6920/**
6921 * Updates the FOP, FPU.CS and FPUIP registers.
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 * @param pFpuCtx The FPU context.
6925 */
6926DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
6927{
6928 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6929 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6930 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6931 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6932 {
6933 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6934 * happens in real mode here based on the fnsave and fnstenv images. */
6935 pFpuCtx->CS = 0;
6936 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
6937 }
6938 else
6939 {
6940 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
6941 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
6942 }
6943}
6944
6945
6946/**
6947 * Updates the x87.DS and FPUDP registers.
6948 *
6949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6950 * @param pFpuCtx The FPU context.
6951 * @param iEffSeg The effective segment register.
6952 * @param GCPtrEff The effective address relative to @a iEffSeg.
6953 */
6954DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6955{
6956 RTSEL sel;
6957 switch (iEffSeg)
6958 {
6959 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
6960 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
6961 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
6962 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
6963 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
6964 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
6965 default:
6966 AssertMsgFailed(("%d\n", iEffSeg));
6967 sel = pVCpu->cpum.GstCtx.ds.Sel;
6968 }
6969 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6970 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6971 {
6972 pFpuCtx->DS = 0;
6973 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6974 }
6975 else
6976 {
6977 pFpuCtx->DS = sel;
6978 pFpuCtx->FPUDP = GCPtrEff;
6979 }
6980}
6981
6982
6983/**
6984 * Rotates the stack registers in the push direction.
6985 *
6986 * @param pFpuCtx The FPU context.
6987 * @remarks This is a complete waste of time, but fxsave stores the registers in
6988 * stack order.
6989 */
6990DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6991{
6992 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6993 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6994 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6995 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6996 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6997 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6998 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6999 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7000 pFpuCtx->aRegs[0].r80 = r80Tmp;
7001}
7002
7003
7004/**
7005 * Rotates the stack registers in the pop direction.
7006 *
7007 * @param pFpuCtx The FPU context.
7008 * @remarks This is a complete waste of time, but fxsave stores the registers in
7009 * stack order.
7010 */
7011DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7012{
7013 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7014 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7015 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7016 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7017 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7018 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7019 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7020 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7021 pFpuCtx->aRegs[7].r80 = r80Tmp;
7022}
7023
7024
7025/**
7026 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7027 * exception prevents it.
7028 *
7029 * @param pResult The FPU operation result to push.
7030 * @param pFpuCtx The FPU context.
7031 */
7032IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7033{
7034 /* Update FSW and bail if there are pending exceptions afterwards. */
7035 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7036 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7037 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7038 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7039 {
7040 pFpuCtx->FSW = fFsw;
7041 return;
7042 }
7043
7044 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7045 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7046 {
7047 /* All is fine, push the actual value. */
7048 pFpuCtx->FTW |= RT_BIT(iNewTop);
7049 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7050 }
7051 else if (pFpuCtx->FCW & X86_FCW_IM)
7052 {
7053 /* Masked stack overflow, push QNaN. */
7054 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7055 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7056 }
7057 else
7058 {
7059 /* Raise stack overflow, don't push anything. */
7060 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7061 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7062 return;
7063 }
7064
7065 fFsw &= ~X86_FSW_TOP_MASK;
7066 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7067 pFpuCtx->FSW = fFsw;
7068
7069 iemFpuRotateStackPush(pFpuCtx);
7070}
7071
7072
7073/**
7074 * Stores a result in a FPU register and updates the FSW and FTW.
7075 *
7076 * @param pFpuCtx The FPU context.
7077 * @param pResult The result to store.
7078 * @param iStReg Which FPU register to store it in.
7079 */
7080IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7081{
7082 Assert(iStReg < 8);
7083 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7084 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7085 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7086 pFpuCtx->FTW |= RT_BIT(iReg);
7087 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7088}
7089
7090
7091/**
7092 * Only updates the FPU status word (FSW) with the result of the current
7093 * instruction.
7094 *
7095 * @param pFpuCtx The FPU context.
7096 * @param u16FSW The FSW output of the current instruction.
7097 */
7098IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7099{
7100 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7101 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7102}
7103
7104
7105/**
7106 * Pops one item off the FPU stack if no pending exception prevents it.
7107 *
7108 * @param pFpuCtx The FPU context.
7109 */
7110IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7111{
7112 /* Check pending exceptions. */
7113 uint16_t uFSW = pFpuCtx->FSW;
7114 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7115 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7116 return;
7117
7118 /* TOP--. */
7119 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7120 uFSW &= ~X86_FSW_TOP_MASK;
7121 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7122 pFpuCtx->FSW = uFSW;
7123
7124 /* Mark the previous ST0 as empty. */
7125 iOldTop >>= X86_FSW_TOP_SHIFT;
7126 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7127
7128 /* Rotate the registers. */
7129 iemFpuRotateStackPop(pFpuCtx);
7130}
7131
7132
7133/**
7134 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7135 *
7136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7137 * @param pResult The FPU operation result to push.
7138 */
7139IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7140{
7141 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7142 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7143 iemFpuMaybePushResult(pResult, pFpuCtx);
7144}
7145
7146
7147/**
7148 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7149 * and sets FPUDP and FPUDS.
7150 *
7151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7152 * @param pResult The FPU operation result to push.
7153 * @param iEffSeg The effective segment register.
7154 * @param GCPtrEff The effective address relative to @a iEffSeg.
7155 */
7156IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7157{
7158 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7159 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7160 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7161 iemFpuMaybePushResult(pResult, pFpuCtx);
7162}
7163
7164
7165/**
7166 * Replace ST0 with the first value and push the second onto the FPU stack,
7167 * unless a pending exception prevents it.
7168 *
7169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7170 * @param pResult The FPU operation result to store and push.
7171 */
7172IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7173{
7174 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7176
7177 /* Update FSW and bail if there are pending exceptions afterwards. */
7178 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7179 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7180 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7181 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7182 {
7183 pFpuCtx->FSW = fFsw;
7184 return;
7185 }
7186
7187 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7188 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7189 {
7190 /* All is fine, push the actual value. */
7191 pFpuCtx->FTW |= RT_BIT(iNewTop);
7192 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7193 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7194 }
7195 else if (pFpuCtx->FCW & X86_FCW_IM)
7196 {
7197 /* Masked stack overflow, push QNaN. */
7198 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7199 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7200 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7201 }
7202 else
7203 {
7204 /* Raise stack overflow, don't push anything. */
7205 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7206 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7207 return;
7208 }
7209
7210 fFsw &= ~X86_FSW_TOP_MASK;
7211 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7212 pFpuCtx->FSW = fFsw;
7213
7214 iemFpuRotateStackPush(pFpuCtx);
7215}
7216
7217
7218/**
7219 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7220 * FOP.
7221 *
7222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7223 * @param pResult The result to store.
7224 * @param iStReg Which FPU register to store it in.
7225 */
7226IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7227{
7228 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7229 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7230 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7231}
7232
7233
7234/**
7235 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7236 * FOP, and then pops the stack.
7237 *
7238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7239 * @param pResult The result to store.
7240 * @param iStReg Which FPU register to store it in.
7241 */
7242IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7243{
7244 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7246 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7247 iemFpuMaybePopOne(pFpuCtx);
7248}
7249
7250
7251/**
7252 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7253 * FPUDP, and FPUDS.
7254 *
7255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7256 * @param pResult The result to store.
7257 * @param iStReg Which FPU register to store it in.
7258 * @param iEffSeg The effective memory operand selector register.
7259 * @param GCPtrEff The effective memory operand offset.
7260 */
7261IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7262 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7263{
7264 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7265 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7266 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7267 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7268}
7269
7270
7271/**
7272 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7273 * FPUDP, and FPUDS, and then pops the stack.
7274 *
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param pResult The result to store.
7277 * @param iStReg Which FPU register to store it in.
7278 * @param iEffSeg The effective memory operand selector register.
7279 * @param GCPtrEff The effective memory operand offset.
7280 */
7281IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7282 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7283{
7284 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7285 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7286 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7287 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7288 iemFpuMaybePopOne(pFpuCtx);
7289}
7290
7291
7292/**
7293 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7294 *
7295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7296 */
7297IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7298{
7299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7300 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7301}
7302
7303
7304/**
7305 * Marks the specified stack register as free (for FFREE).
7306 *
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 * @param iStReg The register to free.
7309 */
7310IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7311{
7312 Assert(iStReg < 8);
7313 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7314 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7315 pFpuCtx->FTW &= ~RT_BIT(iReg);
7316}
7317
7318
7319/**
7320 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7321 *
7322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7323 */
7324IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7325{
7326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7327 uint16_t uFsw = pFpuCtx->FSW;
7328 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7329 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7330 uFsw &= ~X86_FSW_TOP_MASK;
7331 uFsw |= uTop;
7332 pFpuCtx->FSW = uFsw;
7333}
7334
7335
7336/**
7337 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7338 *
7339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7340 */
7341IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7342{
7343 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7344 uint16_t uFsw = pFpuCtx->FSW;
7345 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7346 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7347 uFsw &= ~X86_FSW_TOP_MASK;
7348 uFsw |= uTop;
7349 pFpuCtx->FSW = uFsw;
7350}
7351
7352
7353/**
7354 * Updates the FSW, FOP, FPUIP, and FPUCS.
7355 *
7356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7357 * @param u16FSW The FSW from the current instruction.
7358 */
7359IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7360{
7361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7362 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7363 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7364}
7365
7366
7367/**
7368 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7369 *
7370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7371 * @param u16FSW The FSW from the current instruction.
7372 */
7373IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7374{
7375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7377 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7378 iemFpuMaybePopOne(pFpuCtx);
7379}
7380
7381
7382/**
7383 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param u16FSW The FSW from the current instruction.
7387 * @param iEffSeg The effective memory operand selector register.
7388 * @param GCPtrEff The effective memory operand offset.
7389 */
7390IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7391{
7392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7393 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7394 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7395 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7396}
7397
7398
7399/**
7400 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7401 *
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param u16FSW The FSW from the current instruction.
7404 */
7405IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7409 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7410 iemFpuMaybePopOne(pFpuCtx);
7411 iemFpuMaybePopOne(pFpuCtx);
7412}
7413
7414
7415/**
7416 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param u16FSW The FSW from the current instruction.
7420 * @param iEffSeg The effective memory operand selector register.
7421 * @param GCPtrEff The effective memory operand offset.
7422 */
7423IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7424{
7425 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7426 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7427 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7428 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7429 iemFpuMaybePopOne(pFpuCtx);
7430}
7431
7432
7433/**
7434 * Worker routine for raising an FPU stack underflow exception.
7435 *
7436 * @param pFpuCtx The FPU context.
7437 * @param iStReg The stack register being accessed.
7438 */
7439IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7440{
7441 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7442 if (pFpuCtx->FCW & X86_FCW_IM)
7443 {
7444 /* Masked underflow. */
7445 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7446 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7447 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7448 if (iStReg != UINT8_MAX)
7449 {
7450 pFpuCtx->FTW |= RT_BIT(iReg);
7451 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7452 }
7453 }
7454 else
7455 {
7456 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7457 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7458 }
7459}
7460
7461
7462/**
7463 * Raises a FPU stack underflow exception.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 * @param iStReg The destination register that should be loaded
7467 * with QNaN if \#IS is not masked. Specify
7468 * UINT8_MAX if none (like for fcom).
7469 */
7470DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7471{
7472 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7473 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7474 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7475}
7476
7477
7478DECL_NO_INLINE(IEM_STATIC, void)
7479iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7480{
7481 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7482 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7483 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7484 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7485}
7486
7487
7488DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7489{
7490 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7491 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7492 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7493 iemFpuMaybePopOne(pFpuCtx);
7494}
7495
7496
7497DECL_NO_INLINE(IEM_STATIC, void)
7498iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7499{
7500 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7501 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7502 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7503 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7504 iemFpuMaybePopOne(pFpuCtx);
7505}
7506
7507
7508DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7509{
7510 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7511 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7512 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7513 iemFpuMaybePopOne(pFpuCtx);
7514 iemFpuMaybePopOne(pFpuCtx);
7515}
7516
7517
7518DECL_NO_INLINE(IEM_STATIC, void)
7519iemFpuStackPushUnderflow(PVMCPU pVCpu)
7520{
7521 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7523
7524 if (pFpuCtx->FCW & X86_FCW_IM)
7525 {
7526 /* Masked overflow - Push QNaN. */
7527 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7528 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7529 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7530 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7531 pFpuCtx->FTW |= RT_BIT(iNewTop);
7532 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7533 iemFpuRotateStackPush(pFpuCtx);
7534 }
7535 else
7536 {
7537 /* Exception pending - don't change TOP or the register stack. */
7538 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7539 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7540 }
7541}
7542
7543
7544DECL_NO_INLINE(IEM_STATIC, void)
7545iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7546{
7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7548 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7549
7550 if (pFpuCtx->FCW & X86_FCW_IM)
7551 {
7552 /* Masked overflow - Push QNaN. */
7553 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7554 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7555 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7556 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7557 pFpuCtx->FTW |= RT_BIT(iNewTop);
7558 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7559 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7560 iemFpuRotateStackPush(pFpuCtx);
7561 }
7562 else
7563 {
7564 /* Exception pending - don't change TOP or the register stack. */
7565 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7566 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7567 }
7568}
7569
7570
7571/**
7572 * Worker routine for raising an FPU stack overflow exception on a push.
7573 *
7574 * @param pFpuCtx The FPU context.
7575 */
7576IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7577{
7578 if (pFpuCtx->FCW & X86_FCW_IM)
7579 {
7580 /* Masked overflow. */
7581 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7582 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7583 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7584 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7585 pFpuCtx->FTW |= RT_BIT(iNewTop);
7586 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7587 iemFpuRotateStackPush(pFpuCtx);
7588 }
7589 else
7590 {
7591 /* Exception pending - don't change TOP or the register stack. */
7592 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7593 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7594 }
7595}
7596
7597
7598/**
7599 * Raises a FPU stack overflow exception on a push.
7600 *
7601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7602 */
7603DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7604{
7605 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7607 iemFpuStackPushOverflowOnly(pFpuCtx);
7608}
7609
7610
7611/**
7612 * Raises a FPU stack overflow exception on a push with a memory operand.
7613 *
7614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7615 * @param iEffSeg The effective memory operand selector register.
7616 * @param GCPtrEff The effective memory operand offset.
7617 */
7618DECL_NO_INLINE(IEM_STATIC, void)
7619iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7620{
7621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7623 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7624 iemFpuStackPushOverflowOnly(pFpuCtx);
7625}
7626
7627
7628IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7629{
7630 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7631 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7632 if (pFpuCtx->FTW & RT_BIT(iReg))
7633 return VINF_SUCCESS;
7634 return VERR_NOT_FOUND;
7635}
7636
7637
7638IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7639{
7640 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7641 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7642 if (pFpuCtx->FTW & RT_BIT(iReg))
7643 {
7644 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7645 return VINF_SUCCESS;
7646 }
7647 return VERR_NOT_FOUND;
7648}
7649
7650
7651IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7652 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7653{
7654 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7655 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7656 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7657 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7658 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7659 {
7660 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7661 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7662 return VINF_SUCCESS;
7663 }
7664 return VERR_NOT_FOUND;
7665}
7666
7667
7668IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7669{
7670 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7671 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7672 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7673 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7674 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7675 {
7676 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7677 return VINF_SUCCESS;
7678 }
7679 return VERR_NOT_FOUND;
7680}
7681
7682
7683/**
7684 * Updates the FPU exception status after FCW is changed.
7685 *
7686 * @param pFpuCtx The FPU context.
7687 */
7688IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7689{
7690 uint16_t u16Fsw = pFpuCtx->FSW;
7691 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7692 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7693 else
7694 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7695 pFpuCtx->FSW = u16Fsw;
7696}
7697
7698
7699/**
7700 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7701 *
7702 * @returns The full FTW.
7703 * @param pFpuCtx The FPU context.
7704 */
7705IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7706{
7707 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7708 uint16_t u16Ftw = 0;
7709 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7710 for (unsigned iSt = 0; iSt < 8; iSt++)
7711 {
7712 unsigned const iReg = (iSt + iTop) & 7;
7713 if (!(u8Ftw & RT_BIT(iReg)))
7714 u16Ftw |= 3 << (iReg * 2); /* empty */
7715 else
7716 {
7717 uint16_t uTag;
7718 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7719 if (pr80Reg->s.uExponent == 0x7fff)
7720 uTag = 2; /* Exponent is all 1's => Special. */
7721 else if (pr80Reg->s.uExponent == 0x0000)
7722 {
7723 if (pr80Reg->s.u64Mantissa == 0x0000)
7724 uTag = 1; /* All bits are zero => Zero. */
7725 else
7726 uTag = 2; /* Must be special. */
7727 }
7728 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7729 uTag = 0; /* Valid. */
7730 else
7731 uTag = 2; /* Must be special. */
7732
7733 u16Ftw |= uTag << (iReg * 2); /* empty */
7734 }
7735 }
7736
7737 return u16Ftw;
7738}
7739
7740
7741/**
7742 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7743 *
7744 * @returns The compressed FTW.
7745 * @param u16FullFtw The full FTW to convert.
7746 */
7747IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7748{
7749 uint8_t u8Ftw = 0;
7750 for (unsigned i = 0; i < 8; i++)
7751 {
7752 if ((u16FullFtw & 3) != 3 /*empty*/)
7753 u8Ftw |= RT_BIT(i);
7754 u16FullFtw >>= 2;
7755 }
7756
7757 return u8Ftw;
7758}
7759
7760/** @} */
7761
7762
7763/** @name Memory access.
7764 *
7765 * @{
7766 */
7767
7768
7769/**
7770 * Updates the IEMCPU::cbWritten counter if applicable.
7771 *
7772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7773 * @param fAccess The access being accounted for.
7774 * @param cbMem The access size.
7775 */
7776DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7777{
7778 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7779 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7780 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7781}
7782
7783
7784/**
7785 * Checks if the given segment can be written to, raise the appropriate
7786 * exception if not.
7787 *
7788 * @returns VBox strict status code.
7789 *
7790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7791 * @param pHid Pointer to the hidden register.
7792 * @param iSegReg The register number.
7793 * @param pu64BaseAddr Where to return the base address to use for the
7794 * segment. (In 64-bit code it may differ from the
7795 * base in the hidden segment.)
7796 */
7797IEM_STATIC VBOXSTRICTRC
7798iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7799{
7800 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7801
7802 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7803 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7804 else
7805 {
7806 if (!pHid->Attr.n.u1Present)
7807 {
7808 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7809 AssertRelease(uSel == 0);
7810 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7811 return iemRaiseGeneralProtectionFault0(pVCpu);
7812 }
7813
7814 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7815 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7816 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7817 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7818 *pu64BaseAddr = pHid->u64Base;
7819 }
7820 return VINF_SUCCESS;
7821}
7822
7823
7824/**
7825 * Checks if the given segment can be read from, raise the appropriate
7826 * exception if not.
7827 *
7828 * @returns VBox strict status code.
7829 *
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param pHid Pointer to the hidden register.
7832 * @param iSegReg The register number.
7833 * @param pu64BaseAddr Where to return the base address to use for the
7834 * segment. (In 64-bit code it may differ from the
7835 * base in the hidden segment.)
7836 */
7837IEM_STATIC VBOXSTRICTRC
7838iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7839{
7840 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7841
7842 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7843 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7844 else
7845 {
7846 if (!pHid->Attr.n.u1Present)
7847 {
7848 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7849 AssertRelease(uSel == 0);
7850 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7851 return iemRaiseGeneralProtectionFault0(pVCpu);
7852 }
7853
7854 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7855 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7856 *pu64BaseAddr = pHid->u64Base;
7857 }
7858 return VINF_SUCCESS;
7859}
7860
7861
7862/**
7863 * Applies the segment limit, base and attributes.
7864 *
7865 * This may raise a \#GP or \#SS.
7866 *
7867 * @returns VBox strict status code.
7868 *
7869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7870 * @param fAccess The kind of access which is being performed.
7871 * @param iSegReg The index of the segment register to apply.
7872 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7873 * TSS, ++).
7874 * @param cbMem The access size.
7875 * @param pGCPtrMem Pointer to the guest memory address to apply
7876 * segmentation to. Input and output parameter.
7877 */
7878IEM_STATIC VBOXSTRICTRC
7879iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7880{
7881 if (iSegReg == UINT8_MAX)
7882 return VINF_SUCCESS;
7883
7884 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7885 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7886 switch (pVCpu->iem.s.enmCpuMode)
7887 {
7888 case IEMMODE_16BIT:
7889 case IEMMODE_32BIT:
7890 {
7891 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7892 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7893
7894 if ( pSel->Attr.n.u1Present
7895 && !pSel->Attr.n.u1Unusable)
7896 {
7897 Assert(pSel->Attr.n.u1DescType);
7898 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7899 {
7900 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7901 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7902 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7903
7904 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7905 {
7906 /** @todo CPL check. */
7907 }
7908
7909 /*
7910 * There are two kinds of data selectors, normal and expand down.
7911 */
7912 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7913 {
7914 if ( GCPtrFirst32 > pSel->u32Limit
7915 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7916 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7917 }
7918 else
7919 {
7920 /*
7921 * The upper boundary is defined by the B bit, not the G bit!
7922 */
7923 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7924 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7925 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7926 }
7927 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7928 }
7929 else
7930 {
7931
7932 /*
7933 * Code selector and usually be used to read thru, writing is
7934 * only permitted in real and V8086 mode.
7935 */
7936 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7937 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7938 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7939 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7940 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7941
7942 if ( GCPtrFirst32 > pSel->u32Limit
7943 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7944 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7945
7946 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7947 {
7948 /** @todo CPL check. */
7949 }
7950
7951 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7952 }
7953 }
7954 else
7955 return iemRaiseGeneralProtectionFault0(pVCpu);
7956 return VINF_SUCCESS;
7957 }
7958
7959 case IEMMODE_64BIT:
7960 {
7961 RTGCPTR GCPtrMem = *pGCPtrMem;
7962 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7963 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7964
7965 Assert(cbMem >= 1);
7966 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7967 return VINF_SUCCESS;
7968 return iemRaiseGeneralProtectionFault0(pVCpu);
7969 }
7970
7971 default:
7972 AssertFailedReturn(VERR_IEM_IPE_7);
7973 }
7974}
7975
7976
7977/**
7978 * Translates a virtual address to a physical physical address and checks if we
7979 * can access the page as specified.
7980 *
7981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7982 * @param GCPtrMem The virtual address.
7983 * @param fAccess The intended access.
7984 * @param pGCPhysMem Where to return the physical address.
7985 */
7986IEM_STATIC VBOXSTRICTRC
7987iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7988{
7989 /** @todo Need a different PGM interface here. We're currently using
7990 * generic / REM interfaces. this won't cut it for R0 & RC. */
7991 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
7992 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
7993 RTGCPHYS GCPhys;
7994 uint64_t fFlags;
7995 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7996 if (RT_FAILURE(rc))
7997 {
7998 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7999 /** @todo Check unassigned memory in unpaged mode. */
8000 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8001 *pGCPhysMem = NIL_RTGCPHYS;
8002 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8003 }
8004
8005 /* If the page is writable and does not have the no-exec bit set, all
8006 access is allowed. Otherwise we'll have to check more carefully... */
8007 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8008 {
8009 /* Write to read only memory? */
8010 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8011 && !(fFlags & X86_PTE_RW)
8012 && ( (pVCpu->iem.s.uCpl == 3
8013 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8014 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8015 {
8016 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8017 *pGCPhysMem = NIL_RTGCPHYS;
8018 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8019 }
8020
8021 /* Kernel memory accessed by userland? */
8022 if ( !(fFlags & X86_PTE_US)
8023 && pVCpu->iem.s.uCpl == 3
8024 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8025 {
8026 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8027 *pGCPhysMem = NIL_RTGCPHYS;
8028 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8029 }
8030
8031 /* Executing non-executable memory? */
8032 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8033 && (fFlags & X86_PTE_PAE_NX)
8034 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8035 {
8036 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8037 *pGCPhysMem = NIL_RTGCPHYS;
8038 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8039 VERR_ACCESS_DENIED);
8040 }
8041 }
8042
8043 /*
8044 * Set the dirty / access flags.
8045 * ASSUMES this is set when the address is translated rather than on committ...
8046 */
8047 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8048 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8049 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8050 {
8051 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8052 AssertRC(rc2);
8053 }
8054
8055 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8056 *pGCPhysMem = GCPhys;
8057 return VINF_SUCCESS;
8058}
8059
8060
8061
8062/**
8063 * Maps a physical page.
8064 *
8065 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8067 * @param GCPhysMem The physical address.
8068 * @param fAccess The intended access.
8069 * @param ppvMem Where to return the mapping address.
8070 * @param pLock The PGM lock.
8071 */
8072IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8073{
8074#ifdef IEM_LOG_MEMORY_WRITES
8075 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8076 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8077#endif
8078
8079 /** @todo This API may require some improving later. A private deal with PGM
8080 * regarding locking and unlocking needs to be struct. A couple of TLBs
8081 * living in PGM, but with publicly accessible inlined access methods
8082 * could perhaps be an even better solution. */
8083 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8084 GCPhysMem,
8085 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8086 pVCpu->iem.s.fBypassHandlers,
8087 ppvMem,
8088 pLock);
8089 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8090 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8091
8092 return rc;
8093}
8094
8095
8096/**
8097 * Unmap a page previously mapped by iemMemPageMap.
8098 *
8099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8100 * @param GCPhysMem The physical address.
8101 * @param fAccess The intended access.
8102 * @param pvMem What iemMemPageMap returned.
8103 * @param pLock The PGM lock.
8104 */
8105DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8106{
8107 NOREF(pVCpu);
8108 NOREF(GCPhysMem);
8109 NOREF(fAccess);
8110 NOREF(pvMem);
8111 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8112}
8113
8114
8115/**
8116 * Looks up a memory mapping entry.
8117 *
8118 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8120 * @param pvMem The memory address.
8121 * @param fAccess The access to.
8122 */
8123DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8124{
8125 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8126 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8127 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8128 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8129 return 0;
8130 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8131 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8132 return 1;
8133 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8134 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8135 return 2;
8136 return VERR_NOT_FOUND;
8137}
8138
8139
8140/**
8141 * Finds a free memmap entry when using iNextMapping doesn't work.
8142 *
8143 * @returns Memory mapping index, 1024 on failure.
8144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8145 */
8146IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8147{
8148 /*
8149 * The easy case.
8150 */
8151 if (pVCpu->iem.s.cActiveMappings == 0)
8152 {
8153 pVCpu->iem.s.iNextMapping = 1;
8154 return 0;
8155 }
8156
8157 /* There should be enough mappings for all instructions. */
8158 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8159
8160 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8161 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8162 return i;
8163
8164 AssertFailedReturn(1024);
8165}
8166
8167
8168/**
8169 * Commits a bounce buffer that needs writing back and unmaps it.
8170 *
8171 * @returns Strict VBox status code.
8172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8173 * @param iMemMap The index of the buffer to commit.
8174 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8175 * Always false in ring-3, obviously.
8176 */
8177IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8178{
8179 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8180 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8181#ifdef IN_RING3
8182 Assert(!fPostponeFail);
8183 RT_NOREF_PV(fPostponeFail);
8184#endif
8185
8186 /*
8187 * Do the writing.
8188 */
8189 PVM pVM = pVCpu->CTX_SUFF(pVM);
8190 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8191 {
8192 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8193 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8194 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8195 if (!pVCpu->iem.s.fBypassHandlers)
8196 {
8197 /*
8198 * Carefully and efficiently dealing with access handler return
8199 * codes make this a little bloated.
8200 */
8201 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8203 pbBuf,
8204 cbFirst,
8205 PGMACCESSORIGIN_IEM);
8206 if (rcStrict == VINF_SUCCESS)
8207 {
8208 if (cbSecond)
8209 {
8210 rcStrict = PGMPhysWrite(pVM,
8211 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8212 pbBuf + cbFirst,
8213 cbSecond,
8214 PGMACCESSORIGIN_IEM);
8215 if (rcStrict == VINF_SUCCESS)
8216 { /* nothing */ }
8217 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8218 {
8219 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8220 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8221 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8222 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8223 }
8224#ifndef IN_RING3
8225 else if (fPostponeFail)
8226 {
8227 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8229 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8230 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8231 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8232 return iemSetPassUpStatus(pVCpu, rcStrict);
8233 }
8234#endif
8235 else
8236 {
8237 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8238 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8239 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8240 return rcStrict;
8241 }
8242 }
8243 }
8244 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8245 {
8246 if (!cbSecond)
8247 {
8248 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8249 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8250 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8251 }
8252 else
8253 {
8254 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8255 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8256 pbBuf + cbFirst,
8257 cbSecond,
8258 PGMACCESSORIGIN_IEM);
8259 if (rcStrict2 == VINF_SUCCESS)
8260 {
8261 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8262 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8263 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8264 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8265 }
8266 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8267 {
8268 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8269 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8270 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8271 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8273 }
8274#ifndef IN_RING3
8275 else if (fPostponeFail)
8276 {
8277 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8278 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8279 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8280 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8281 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8282 return iemSetPassUpStatus(pVCpu, rcStrict);
8283 }
8284#endif
8285 else
8286 {
8287 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8288 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8289 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8290 return rcStrict2;
8291 }
8292 }
8293 }
8294#ifndef IN_RING3
8295 else if (fPostponeFail)
8296 {
8297 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8298 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8299 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8300 if (!cbSecond)
8301 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8302 else
8303 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8304 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8305 return iemSetPassUpStatus(pVCpu, rcStrict);
8306 }
8307#endif
8308 else
8309 {
8310 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8311 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8313 return rcStrict;
8314 }
8315 }
8316 else
8317 {
8318 /*
8319 * No access handlers, much simpler.
8320 */
8321 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8322 if (RT_SUCCESS(rc))
8323 {
8324 if (cbSecond)
8325 {
8326 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8327 if (RT_SUCCESS(rc))
8328 { /* likely */ }
8329 else
8330 {
8331 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8332 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8334 return rc;
8335 }
8336 }
8337 }
8338 else
8339 {
8340 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8343 return rc;
8344 }
8345 }
8346 }
8347
8348#if defined(IEM_LOG_MEMORY_WRITES)
8349 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8350 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8351 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8352 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8353 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8354 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8355
8356 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8357 g_cbIemWrote = cbWrote;
8358 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8359#endif
8360
8361 /*
8362 * Free the mapping entry.
8363 */
8364 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8365 Assert(pVCpu->iem.s.cActiveMappings != 0);
8366 pVCpu->iem.s.cActiveMappings--;
8367 return VINF_SUCCESS;
8368}
8369
8370
8371/**
8372 * iemMemMap worker that deals with a request crossing pages.
8373 */
8374IEM_STATIC VBOXSTRICTRC
8375iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8376{
8377 /*
8378 * Do the address translations.
8379 */
8380 RTGCPHYS GCPhysFirst;
8381 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8382 if (rcStrict != VINF_SUCCESS)
8383 return rcStrict;
8384
8385 RTGCPHYS GCPhysSecond;
8386 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8387 fAccess, &GCPhysSecond);
8388 if (rcStrict != VINF_SUCCESS)
8389 return rcStrict;
8390 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8391
8392 PVM pVM = pVCpu->CTX_SUFF(pVM);
8393
8394 /*
8395 * Read in the current memory content if it's a read, execute or partial
8396 * write access.
8397 */
8398 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8399 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8400 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8401
8402 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8403 {
8404 if (!pVCpu->iem.s.fBypassHandlers)
8405 {
8406 /*
8407 * Must carefully deal with access handler status codes here,
8408 * makes the code a bit bloated.
8409 */
8410 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8411 if (rcStrict == VINF_SUCCESS)
8412 {
8413 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8414 if (rcStrict == VINF_SUCCESS)
8415 { /*likely */ }
8416 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8417 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8418 else
8419 {
8420 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8421 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 return rcStrict;
8423 }
8424 }
8425 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8426 {
8427 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8428 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8429 {
8430 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8431 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8432 }
8433 else
8434 {
8435 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8436 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8437 return rcStrict2;
8438 }
8439 }
8440 else
8441 {
8442 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8443 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8444 return rcStrict;
8445 }
8446 }
8447 else
8448 {
8449 /*
8450 * No informational status codes here, much more straight forward.
8451 */
8452 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8453 if (RT_SUCCESS(rc))
8454 {
8455 Assert(rc == VINF_SUCCESS);
8456 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8457 if (RT_SUCCESS(rc))
8458 Assert(rc == VINF_SUCCESS);
8459 else
8460 {
8461 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8462 return rc;
8463 }
8464 }
8465 else
8466 {
8467 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8468 return rc;
8469 }
8470 }
8471 }
8472#ifdef VBOX_STRICT
8473 else
8474 memset(pbBuf, 0xcc, cbMem);
8475 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8476 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8477#endif
8478
8479 /*
8480 * Commit the bounce buffer entry.
8481 */
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8487 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8488 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8489 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8490 pVCpu->iem.s.cActiveMappings++;
8491
8492 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8493 *ppvMem = pbBuf;
8494 return VINF_SUCCESS;
8495}
8496
8497
8498/**
8499 * iemMemMap woker that deals with iemMemPageMap failures.
8500 */
8501IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8502 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8503{
8504 /*
8505 * Filter out conditions we can handle and the ones which shouldn't happen.
8506 */
8507 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8508 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8509 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8510 {
8511 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8512 return rcMap;
8513 }
8514 pVCpu->iem.s.cPotentialExits++;
8515
8516 /*
8517 * Read in the current memory content if it's a read, execute or partial
8518 * write access.
8519 */
8520 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8521 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8522 {
8523 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8524 memset(pbBuf, 0xff, cbMem);
8525 else
8526 {
8527 int rc;
8528 if (!pVCpu->iem.s.fBypassHandlers)
8529 {
8530 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8531 if (rcStrict == VINF_SUCCESS)
8532 { /* nothing */ }
8533 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8534 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8535 else
8536 {
8537 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8538 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8539 return rcStrict;
8540 }
8541 }
8542 else
8543 {
8544 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8545 if (RT_SUCCESS(rc))
8546 { /* likely */ }
8547 else
8548 {
8549 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8550 GCPhysFirst, rc));
8551 return rc;
8552 }
8553 }
8554 }
8555 }
8556#ifdef VBOX_STRICT
8557 else
8558 memset(pbBuf, 0xcc, cbMem);
8559#endif
8560#ifdef VBOX_STRICT
8561 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8562 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8563#endif
8564
8565 /*
8566 * Commit the bounce buffer entry.
8567 */
8568 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8569 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8570 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8571 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8572 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8573 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8574 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8575 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8576 pVCpu->iem.s.cActiveMappings++;
8577
8578 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8579 *ppvMem = pbBuf;
8580 return VINF_SUCCESS;
8581}
8582
8583
8584
8585/**
8586 * Maps the specified guest memory for the given kind of access.
8587 *
8588 * This may be using bounce buffering of the memory if it's crossing a page
8589 * boundary or if there is an access handler installed for any of it. Because
8590 * of lock prefix guarantees, we're in for some extra clutter when this
8591 * happens.
8592 *
8593 * This may raise a \#GP, \#SS, \#PF or \#AC.
8594 *
8595 * @returns VBox strict status code.
8596 *
8597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8598 * @param ppvMem Where to return the pointer to the mapped
8599 * memory.
8600 * @param cbMem The number of bytes to map. This is usually 1,
8601 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8602 * string operations it can be up to a page.
8603 * @param iSegReg The index of the segment register to use for
8604 * this access. The base and limits are checked.
8605 * Use UINT8_MAX to indicate that no segmentation
8606 * is required (for IDT, GDT and LDT accesses).
8607 * @param GCPtrMem The address of the guest memory.
8608 * @param fAccess How the memory is being accessed. The
8609 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8610 * how to map the memory, while the
8611 * IEM_ACCESS_WHAT_XXX bit is used when raising
8612 * exceptions.
8613 */
8614IEM_STATIC VBOXSTRICTRC
8615iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8616{
8617 /*
8618 * Check the input and figure out which mapping entry to use.
8619 */
8620 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8621 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8622 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8623
8624 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8625 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8626 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8627 {
8628 iMemMap = iemMemMapFindFree(pVCpu);
8629 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8630 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8631 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8632 pVCpu->iem.s.aMemMappings[2].fAccess),
8633 VERR_IEM_IPE_9);
8634 }
8635
8636 /*
8637 * Map the memory, checking that we can actually access it. If something
8638 * slightly complicated happens, fall back on bounce buffering.
8639 */
8640 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8641 if (rcStrict != VINF_SUCCESS)
8642 return rcStrict;
8643
8644 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8645 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8646
8647 RTGCPHYS GCPhysFirst;
8648 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8649 if (rcStrict != VINF_SUCCESS)
8650 return rcStrict;
8651
8652 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8653 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8654 if (fAccess & IEM_ACCESS_TYPE_READ)
8655 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8656
8657 void *pvMem;
8658 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8659 if (rcStrict != VINF_SUCCESS)
8660 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8661
8662 /*
8663 * Fill in the mapping table entry.
8664 */
8665 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8666 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8667 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8668 pVCpu->iem.s.cActiveMappings++;
8669
8670 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8671 *ppvMem = pvMem;
8672 return VINF_SUCCESS;
8673}
8674
8675
8676/**
8677 * Commits the guest memory if bounce buffered and unmaps it.
8678 *
8679 * @returns Strict VBox status code.
8680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8681 * @param pvMem The mapping.
8682 * @param fAccess The kind of access.
8683 */
8684IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8685{
8686 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8687 AssertReturn(iMemMap >= 0, iMemMap);
8688
8689 /* If it's bounce buffered, we may need to write back the buffer. */
8690 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8691 {
8692 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8693 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8694 }
8695 /* Otherwise unlock it. */
8696 else
8697 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8698
8699 /* Free the entry. */
8700 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8701 Assert(pVCpu->iem.s.cActiveMappings != 0);
8702 pVCpu->iem.s.cActiveMappings--;
8703 return VINF_SUCCESS;
8704}
8705
8706#ifdef IEM_WITH_SETJMP
8707
8708/**
8709 * Maps the specified guest memory for the given kind of access, longjmp on
8710 * error.
8711 *
8712 * This may be using bounce buffering of the memory if it's crossing a page
8713 * boundary or if there is an access handler installed for any of it. Because
8714 * of lock prefix guarantees, we're in for some extra clutter when this
8715 * happens.
8716 *
8717 * This may raise a \#GP, \#SS, \#PF or \#AC.
8718 *
8719 * @returns Pointer to the mapped memory.
8720 *
8721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8722 * @param cbMem The number of bytes to map. This is usually 1,
8723 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8724 * string operations it can be up to a page.
8725 * @param iSegReg The index of the segment register to use for
8726 * this access. The base and limits are checked.
8727 * Use UINT8_MAX to indicate that no segmentation
8728 * is required (for IDT, GDT and LDT accesses).
8729 * @param GCPtrMem The address of the guest memory.
8730 * @param fAccess How the memory is being accessed. The
8731 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8732 * how to map the memory, while the
8733 * IEM_ACCESS_WHAT_XXX bit is used when raising
8734 * exceptions.
8735 */
8736IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8737{
8738 /*
8739 * Check the input and figure out which mapping entry to use.
8740 */
8741 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8742 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8743 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8744
8745 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8746 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8747 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8748 {
8749 iMemMap = iemMemMapFindFree(pVCpu);
8750 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8751 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8752 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8753 pVCpu->iem.s.aMemMappings[2].fAccess),
8754 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8755 }
8756
8757 /*
8758 * Map the memory, checking that we can actually access it. If something
8759 * slightly complicated happens, fall back on bounce buffering.
8760 */
8761 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8762 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8763 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8764
8765 /* Crossing a page boundary? */
8766 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8767 { /* No (likely). */ }
8768 else
8769 {
8770 void *pvMem;
8771 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8772 if (rcStrict == VINF_SUCCESS)
8773 return pvMem;
8774 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8775 }
8776
8777 RTGCPHYS GCPhysFirst;
8778 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8779 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8780 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8781
8782 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8783 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8784 if (fAccess & IEM_ACCESS_TYPE_READ)
8785 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8786
8787 void *pvMem;
8788 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8789 if (rcStrict == VINF_SUCCESS)
8790 { /* likely */ }
8791 else
8792 {
8793 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8794 if (rcStrict == VINF_SUCCESS)
8795 return pvMem;
8796 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8797 }
8798
8799 /*
8800 * Fill in the mapping table entry.
8801 */
8802 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8803 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8804 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8805 pVCpu->iem.s.cActiveMappings++;
8806
8807 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8808 return pvMem;
8809}
8810
8811
8812/**
8813 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8814 *
8815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8816 * @param pvMem The mapping.
8817 * @param fAccess The kind of access.
8818 */
8819IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8820{
8821 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8822 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8823
8824 /* If it's bounce buffered, we may need to write back the buffer. */
8825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8826 {
8827 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8828 {
8829 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8830 if (rcStrict == VINF_SUCCESS)
8831 return;
8832 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8833 }
8834 }
8835 /* Otherwise unlock it. */
8836 else
8837 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8838
8839 /* Free the entry. */
8840 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8841 Assert(pVCpu->iem.s.cActiveMappings != 0);
8842 pVCpu->iem.s.cActiveMappings--;
8843}
8844
8845#endif /* IEM_WITH_SETJMP */
8846
8847#ifndef IN_RING3
8848/**
8849 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8850 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8851 *
8852 * Allows the instruction to be completed and retired, while the IEM user will
8853 * return to ring-3 immediately afterwards and do the postponed writes there.
8854 *
8855 * @returns VBox status code (no strict statuses). Caller must check
8856 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8858 * @param pvMem The mapping.
8859 * @param fAccess The kind of access.
8860 */
8861IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8862{
8863 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8864 AssertReturn(iMemMap >= 0, iMemMap);
8865
8866 /* If it's bounce buffered, we may need to write back the buffer. */
8867 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8868 {
8869 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8870 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8871 }
8872 /* Otherwise unlock it. */
8873 else
8874 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8875
8876 /* Free the entry. */
8877 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8878 Assert(pVCpu->iem.s.cActiveMappings != 0);
8879 pVCpu->iem.s.cActiveMappings--;
8880 return VINF_SUCCESS;
8881}
8882#endif
8883
8884
8885/**
8886 * Rollbacks mappings, releasing page locks and such.
8887 *
8888 * The caller shall only call this after checking cActiveMappings.
8889 *
8890 * @returns Strict VBox status code to pass up.
8891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8892 */
8893IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8894{
8895 Assert(pVCpu->iem.s.cActiveMappings > 0);
8896
8897 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8898 while (iMemMap-- > 0)
8899 {
8900 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8901 if (fAccess != IEM_ACCESS_INVALID)
8902 {
8903 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8904 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8905 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8906 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8907 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
8908 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
8909 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
8910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
8911 pVCpu->iem.s.cActiveMappings--;
8912 }
8913 }
8914}
8915
8916
8917/**
8918 * Fetches a data byte.
8919 *
8920 * @returns Strict VBox status code.
8921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8922 * @param pu8Dst Where to return the byte.
8923 * @param iSegReg The index of the segment register to use for
8924 * this access. The base and limits are checked.
8925 * @param GCPtrMem The address of the guest memory.
8926 */
8927IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8928{
8929 /* The lazy approach for now... */
8930 uint8_t const *pu8Src;
8931 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8932 if (rc == VINF_SUCCESS)
8933 {
8934 *pu8Dst = *pu8Src;
8935 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8936 }
8937 return rc;
8938}
8939
8940
8941#ifdef IEM_WITH_SETJMP
8942/**
8943 * Fetches a data byte, longjmp on error.
8944 *
8945 * @returns The byte.
8946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8947 * @param iSegReg The index of the segment register to use for
8948 * this access. The base and limits are checked.
8949 * @param GCPtrMem The address of the guest memory.
8950 */
8951DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8952{
8953 /* The lazy approach for now... */
8954 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8955 uint8_t const bRet = *pu8Src;
8956 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8957 return bRet;
8958}
8959#endif /* IEM_WITH_SETJMP */
8960
8961
8962/**
8963 * Fetches a data word.
8964 *
8965 * @returns Strict VBox status code.
8966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8967 * @param pu16Dst Where to return the word.
8968 * @param iSegReg The index of the segment register to use for
8969 * this access. The base and limits are checked.
8970 * @param GCPtrMem The address of the guest memory.
8971 */
8972IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8973{
8974 /* The lazy approach for now... */
8975 uint16_t const *pu16Src;
8976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8977 if (rc == VINF_SUCCESS)
8978 {
8979 *pu16Dst = *pu16Src;
8980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8981 }
8982 return rc;
8983}
8984
8985
8986#ifdef IEM_WITH_SETJMP
8987/**
8988 * Fetches a data word, longjmp on error.
8989 *
8990 * @returns The word
8991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8992 * @param iSegReg The index of the segment register to use for
8993 * this access. The base and limits are checked.
8994 * @param GCPtrMem The address of the guest memory.
8995 */
8996DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8997{
8998 /* The lazy approach for now... */
8999 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9000 uint16_t const u16Ret = *pu16Src;
9001 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9002 return u16Ret;
9003}
9004#endif
9005
9006
9007/**
9008 * Fetches a data dword.
9009 *
9010 * @returns Strict VBox status code.
9011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9012 * @param pu32Dst Where to return the dword.
9013 * @param iSegReg The index of the segment register to use for
9014 * this access. The base and limits are checked.
9015 * @param GCPtrMem The address of the guest memory.
9016 */
9017IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9018{
9019 /* The lazy approach for now... */
9020 uint32_t const *pu32Src;
9021 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9022 if (rc == VINF_SUCCESS)
9023 {
9024 *pu32Dst = *pu32Src;
9025 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9026 }
9027 return rc;
9028}
9029
9030
9031#ifdef IEM_WITH_SETJMP
9032
9033IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9034{
9035 Assert(cbMem >= 1);
9036 Assert(iSegReg < X86_SREG_COUNT);
9037
9038 /*
9039 * 64-bit mode is simpler.
9040 */
9041 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9042 {
9043 if (iSegReg >= X86_SREG_FS)
9044 {
9045 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9046 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9047 GCPtrMem += pSel->u64Base;
9048 }
9049
9050 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9051 return GCPtrMem;
9052 }
9053 /*
9054 * 16-bit and 32-bit segmentation.
9055 */
9056 else
9057 {
9058 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9059 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9060 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9061 == X86DESCATTR_P /* data, expand up */
9062 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9063 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9064 {
9065 /* expand up */
9066 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9067 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9068 && GCPtrLast32 > (uint32_t)GCPtrMem))
9069 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9070 }
9071 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9072 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9073 {
9074 /* expand down */
9075 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9076 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9077 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9078 && GCPtrLast32 > (uint32_t)GCPtrMem))
9079 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9080 }
9081 else
9082 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9083 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9084 }
9085 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9086}
9087
9088
9089IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9090{
9091 Assert(cbMem >= 1);
9092 Assert(iSegReg < X86_SREG_COUNT);
9093
9094 /*
9095 * 64-bit mode is simpler.
9096 */
9097 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9098 {
9099 if (iSegReg >= X86_SREG_FS)
9100 {
9101 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9102 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9103 GCPtrMem += pSel->u64Base;
9104 }
9105
9106 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9107 return GCPtrMem;
9108 }
9109 /*
9110 * 16-bit and 32-bit segmentation.
9111 */
9112 else
9113 {
9114 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9115 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9116 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9117 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9118 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9119 {
9120 /* expand up */
9121 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9122 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9123 && GCPtrLast32 > (uint32_t)GCPtrMem))
9124 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9125 }
9126 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9127 {
9128 /* expand down */
9129 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9130 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9131 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9132 && GCPtrLast32 > (uint32_t)GCPtrMem))
9133 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9134 }
9135 else
9136 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9137 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9138 }
9139 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9140}
9141
9142
9143/**
9144 * Fetches a data dword, longjmp on error, fallback/safe version.
9145 *
9146 * @returns The dword
9147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9148 * @param iSegReg The index of the segment register to use for
9149 * this access. The base and limits are checked.
9150 * @param GCPtrMem The address of the guest memory.
9151 */
9152IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9153{
9154 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9155 uint32_t const u32Ret = *pu32Src;
9156 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9157 return u32Ret;
9158}
9159
9160
9161/**
9162 * Fetches a data dword, longjmp on error.
9163 *
9164 * @returns The dword
9165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9166 * @param iSegReg The index of the segment register to use for
9167 * this access. The base and limits are checked.
9168 * @param GCPtrMem The address of the guest memory.
9169 */
9170DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9171{
9172# ifdef IEM_WITH_DATA_TLB
9173 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9174 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9175 {
9176 /// @todo more later.
9177 }
9178
9179 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9180# else
9181 /* The lazy approach. */
9182 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9183 uint32_t const u32Ret = *pu32Src;
9184 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9185 return u32Ret;
9186# endif
9187}
9188#endif
9189
9190
9191#ifdef SOME_UNUSED_FUNCTION
9192/**
9193 * Fetches a data dword and sign extends it to a qword.
9194 *
9195 * @returns Strict VBox status code.
9196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9197 * @param pu64Dst Where to return the sign extended value.
9198 * @param iSegReg The index of the segment register to use for
9199 * this access. The base and limits are checked.
9200 * @param GCPtrMem The address of the guest memory.
9201 */
9202IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9203{
9204 /* The lazy approach for now... */
9205 int32_t const *pi32Src;
9206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9207 if (rc == VINF_SUCCESS)
9208 {
9209 *pu64Dst = *pi32Src;
9210 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9211 }
9212#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9213 else
9214 *pu64Dst = 0;
9215#endif
9216 return rc;
9217}
9218#endif
9219
9220
9221/**
9222 * Fetches a data qword.
9223 *
9224 * @returns Strict VBox status code.
9225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9226 * @param pu64Dst Where to return the qword.
9227 * @param iSegReg The index of the segment register to use for
9228 * this access. The base and limits are checked.
9229 * @param GCPtrMem The address of the guest memory.
9230 */
9231IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9232{
9233 /* The lazy approach for now... */
9234 uint64_t const *pu64Src;
9235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9236 if (rc == VINF_SUCCESS)
9237 {
9238 *pu64Dst = *pu64Src;
9239 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9240 }
9241 return rc;
9242}
9243
9244
9245#ifdef IEM_WITH_SETJMP
9246/**
9247 * Fetches a data qword, longjmp on error.
9248 *
9249 * @returns The qword.
9250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9251 * @param iSegReg The index of the segment register to use for
9252 * this access. The base and limits are checked.
9253 * @param GCPtrMem The address of the guest memory.
9254 */
9255DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9256{
9257 /* The lazy approach for now... */
9258 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9259 uint64_t const u64Ret = *pu64Src;
9260 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9261 return u64Ret;
9262}
9263#endif
9264
9265
9266/**
9267 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9268 *
9269 * @returns Strict VBox status code.
9270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9271 * @param pu64Dst Where to return the qword.
9272 * @param iSegReg The index of the segment register to use for
9273 * this access. The base and limits are checked.
9274 * @param GCPtrMem The address of the guest memory.
9275 */
9276IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9277{
9278 /* The lazy approach for now... */
9279 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9280 if (RT_UNLIKELY(GCPtrMem & 15))
9281 return iemRaiseGeneralProtectionFault0(pVCpu);
9282
9283 uint64_t const *pu64Src;
9284 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9285 if (rc == VINF_SUCCESS)
9286 {
9287 *pu64Dst = *pu64Src;
9288 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9289 }
9290 return rc;
9291}
9292
9293
9294#ifdef IEM_WITH_SETJMP
9295/**
9296 * Fetches a data qword, longjmp on error.
9297 *
9298 * @returns The qword.
9299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 */
9304DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9305{
9306 /* The lazy approach for now... */
9307 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9308 if (RT_LIKELY(!(GCPtrMem & 15)))
9309 {
9310 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9311 uint64_t const u64Ret = *pu64Src;
9312 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9313 return u64Ret;
9314 }
9315
9316 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9317 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9318}
9319#endif
9320
9321
9322/**
9323 * Fetches a data tword.
9324 *
9325 * @returns Strict VBox status code.
9326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9327 * @param pr80Dst Where to return the tword.
9328 * @param iSegReg The index of the segment register to use for
9329 * this access. The base and limits are checked.
9330 * @param GCPtrMem The address of the guest memory.
9331 */
9332IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9333{
9334 /* The lazy approach for now... */
9335 PCRTFLOAT80U pr80Src;
9336 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9337 if (rc == VINF_SUCCESS)
9338 {
9339 *pr80Dst = *pr80Src;
9340 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9341 }
9342 return rc;
9343}
9344
9345
9346#ifdef IEM_WITH_SETJMP
9347/**
9348 * Fetches a data tword, longjmp on error.
9349 *
9350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9351 * @param pr80Dst Where to return the tword.
9352 * @param iSegReg The index of the segment register to use for
9353 * this access. The base and limits are checked.
9354 * @param GCPtrMem The address of the guest memory.
9355 */
9356DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9357{
9358 /* The lazy approach for now... */
9359 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9360 *pr80Dst = *pr80Src;
9361 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9362}
9363#endif
9364
9365
9366/**
9367 * Fetches a data dqword (double qword), generally SSE related.
9368 *
9369 * @returns Strict VBox status code.
9370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9371 * @param pu128Dst Where to return the qword.
9372 * @param iSegReg The index of the segment register to use for
9373 * this access. The base and limits are checked.
9374 * @param GCPtrMem The address of the guest memory.
9375 */
9376IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9377{
9378 /* The lazy approach for now... */
9379 PCRTUINT128U pu128Src;
9380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9381 if (rc == VINF_SUCCESS)
9382 {
9383 pu128Dst->au64[0] = pu128Src->au64[0];
9384 pu128Dst->au64[1] = pu128Src->au64[1];
9385 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9386 }
9387 return rc;
9388}
9389
9390
9391#ifdef IEM_WITH_SETJMP
9392/**
9393 * Fetches a data dqword (double qword), generally SSE related.
9394 *
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param pu128Dst Where to return the qword.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403 /* The lazy approach for now... */
9404 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9405 pu128Dst->au64[0] = pu128Src->au64[0];
9406 pu128Dst->au64[1] = pu128Src->au64[1];
9407 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9408}
9409#endif
9410
9411
9412/**
9413 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9414 * related.
9415 *
9416 * Raises \#GP(0) if not aligned.
9417 *
9418 * @returns Strict VBox status code.
9419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9420 * @param pu128Dst Where to return the qword.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 */
9425IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9426{
9427 /* The lazy approach for now... */
9428 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9429 if ( (GCPtrMem & 15)
9430 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9431 return iemRaiseGeneralProtectionFault0(pVCpu);
9432
9433 PCRTUINT128U pu128Src;
9434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9435 if (rc == VINF_SUCCESS)
9436 {
9437 pu128Dst->au64[0] = pu128Src->au64[0];
9438 pu128Dst->au64[1] = pu128Src->au64[1];
9439 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9440 }
9441 return rc;
9442}
9443
9444
9445#ifdef IEM_WITH_SETJMP
9446/**
9447 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9448 * related, longjmp on error.
9449 *
9450 * Raises \#GP(0) if not aligned.
9451 *
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param pu128Dst Where to return the qword.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9462 if ( (GCPtrMem & 15) == 0
9463 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9464 {
9465 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9466 pu128Dst->au64[0] = pu128Src->au64[0];
9467 pu128Dst->au64[1] = pu128Src->au64[1];
9468 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9469 return;
9470 }
9471
9472 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9473 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data oword (octo word), generally AVX related.
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu256Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 PCRTUINT256U pu256Src;
9492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9493 if (rc == VINF_SUCCESS)
9494 {
9495 pu256Dst->au64[0] = pu256Src->au64[0];
9496 pu256Dst->au64[1] = pu256Src->au64[1];
9497 pu256Dst->au64[2] = pu256Src->au64[2];
9498 pu256Dst->au64[3] = pu256Src->au64[3];
9499 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9500 }
9501 return rc;
9502}
9503
9504
9505#ifdef IEM_WITH_SETJMP
9506/**
9507 * Fetches a data oword (octo word), generally AVX related.
9508 *
9509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9510 * @param pu256Dst Where to return the qword.
9511 * @param iSegReg The index of the segment register to use for
9512 * this access. The base and limits are checked.
9513 * @param GCPtrMem The address of the guest memory.
9514 */
9515IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9516{
9517 /* The lazy approach for now... */
9518 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9519 pu256Dst->au64[0] = pu256Src->au64[0];
9520 pu256Dst->au64[1] = pu256Src->au64[1];
9521 pu256Dst->au64[2] = pu256Src->au64[2];
9522 pu256Dst->au64[3] = pu256Src->au64[3];
9523 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9524}
9525#endif
9526
9527
9528/**
9529 * Fetches a data oword (octo word) at an aligned address, generally AVX
9530 * related.
9531 *
9532 * Raises \#GP(0) if not aligned.
9533 *
9534 * @returns Strict VBox status code.
9535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9536 * @param pu256Dst Where to return the qword.
9537 * @param iSegReg The index of the segment register to use for
9538 * this access. The base and limits are checked.
9539 * @param GCPtrMem The address of the guest memory.
9540 */
9541IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9542{
9543 /* The lazy approach for now... */
9544 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9545 if (GCPtrMem & 31)
9546 return iemRaiseGeneralProtectionFault0(pVCpu);
9547
9548 PCRTUINT256U pu256Src;
9549 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9550 if (rc == VINF_SUCCESS)
9551 {
9552 pu256Dst->au64[0] = pu256Src->au64[0];
9553 pu256Dst->au64[1] = pu256Src->au64[1];
9554 pu256Dst->au64[2] = pu256Src->au64[2];
9555 pu256Dst->au64[3] = pu256Src->au64[3];
9556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9557 }
9558 return rc;
9559}
9560
9561
9562#ifdef IEM_WITH_SETJMP
9563/**
9564 * Fetches a data oword (octo word) at an aligned address, generally AVX
9565 * related, longjmp on error.
9566 *
9567 * Raises \#GP(0) if not aligned.
9568 *
9569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9570 * @param pu256Dst Where to return the qword.
9571 * @param iSegReg The index of the segment register to use for
9572 * this access. The base and limits are checked.
9573 * @param GCPtrMem The address of the guest memory.
9574 */
9575DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9576{
9577 /* The lazy approach for now... */
9578 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9579 if ((GCPtrMem & 31) == 0)
9580 {
9581 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9582 pu256Dst->au64[0] = pu256Src->au64[0];
9583 pu256Dst->au64[1] = pu256Src->au64[1];
9584 pu256Dst->au64[2] = pu256Src->au64[2];
9585 pu256Dst->au64[3] = pu256Src->au64[3];
9586 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9587 return;
9588 }
9589
9590 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9591 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9592}
9593#endif
9594
9595
9596
9597/**
9598 * Fetches a descriptor register (lgdt, lidt).
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pcbLimit Where to return the limit.
9603 * @param pGCPtrBase Where to return the base.
9604 * @param iSegReg The index of the segment register to use for
9605 * this access. The base and limits are checked.
9606 * @param GCPtrMem The address of the guest memory.
9607 * @param enmOpSize The effective operand size.
9608 */
9609IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9610 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9611{
9612 /*
9613 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9614 * little special:
9615 * - The two reads are done separately.
9616 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9617 * - We suspect the 386 to actually commit the limit before the base in
9618 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9619 * don't try emulate this eccentric behavior, because it's not well
9620 * enough understood and rather hard to trigger.
9621 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9622 */
9623 VBOXSTRICTRC rcStrict;
9624 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9625 {
9626 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9627 if (rcStrict == VINF_SUCCESS)
9628 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9629 }
9630 else
9631 {
9632 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9633 if (enmOpSize == IEMMODE_32BIT)
9634 {
9635 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9636 {
9637 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9638 if (rcStrict == VINF_SUCCESS)
9639 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9640 }
9641 else
9642 {
9643 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9644 if (rcStrict == VINF_SUCCESS)
9645 {
9646 *pcbLimit = (uint16_t)uTmp;
9647 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9648 }
9649 }
9650 if (rcStrict == VINF_SUCCESS)
9651 *pGCPtrBase = uTmp;
9652 }
9653 else
9654 {
9655 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9656 if (rcStrict == VINF_SUCCESS)
9657 {
9658 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9659 if (rcStrict == VINF_SUCCESS)
9660 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9661 }
9662 }
9663 }
9664 return rcStrict;
9665}
9666
9667
9668
9669/**
9670 * Stores a data byte.
9671 *
9672 * @returns Strict VBox status code.
9673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9674 * @param iSegReg The index of the segment register to use for
9675 * this access. The base and limits are checked.
9676 * @param GCPtrMem The address of the guest memory.
9677 * @param u8Value The value to store.
9678 */
9679IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9680{
9681 /* The lazy approach for now... */
9682 uint8_t *pu8Dst;
9683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9684 if (rc == VINF_SUCCESS)
9685 {
9686 *pu8Dst = u8Value;
9687 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9688 }
9689 return rc;
9690}
9691
9692
9693#ifdef IEM_WITH_SETJMP
9694/**
9695 * Stores a data byte, longjmp on error.
9696 *
9697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9698 * @param iSegReg The index of the segment register to use for
9699 * this access. The base and limits are checked.
9700 * @param GCPtrMem The address of the guest memory.
9701 * @param u8Value The value to store.
9702 */
9703IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9704{
9705 /* The lazy approach for now... */
9706 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9707 *pu8Dst = u8Value;
9708 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9709}
9710#endif
9711
9712
9713/**
9714 * Stores a data word.
9715 *
9716 * @returns Strict VBox status code.
9717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9718 * @param iSegReg The index of the segment register to use for
9719 * this access. The base and limits are checked.
9720 * @param GCPtrMem The address of the guest memory.
9721 * @param u16Value The value to store.
9722 */
9723IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9724{
9725 /* The lazy approach for now... */
9726 uint16_t *pu16Dst;
9727 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9728 if (rc == VINF_SUCCESS)
9729 {
9730 *pu16Dst = u16Value;
9731 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9732 }
9733 return rc;
9734}
9735
9736
9737#ifdef IEM_WITH_SETJMP
9738/**
9739 * Stores a data word, longjmp on error.
9740 *
9741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 * @param u16Value The value to store.
9746 */
9747IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9748{
9749 /* The lazy approach for now... */
9750 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9751 *pu16Dst = u16Value;
9752 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9753}
9754#endif
9755
9756
9757/**
9758 * Stores a data dword.
9759 *
9760 * @returns Strict VBox status code.
9761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9762 * @param iSegReg The index of the segment register to use for
9763 * this access. The base and limits are checked.
9764 * @param GCPtrMem The address of the guest memory.
9765 * @param u32Value The value to store.
9766 */
9767IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9768{
9769 /* The lazy approach for now... */
9770 uint32_t *pu32Dst;
9771 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9772 if (rc == VINF_SUCCESS)
9773 {
9774 *pu32Dst = u32Value;
9775 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9776 }
9777 return rc;
9778}
9779
9780
9781#ifdef IEM_WITH_SETJMP
9782/**
9783 * Stores a data dword.
9784 *
9785 * @returns Strict VBox status code.
9786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9787 * @param iSegReg The index of the segment register to use for
9788 * this access. The base and limits are checked.
9789 * @param GCPtrMem The address of the guest memory.
9790 * @param u32Value The value to store.
9791 */
9792IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9793{
9794 /* The lazy approach for now... */
9795 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9796 *pu32Dst = u32Value;
9797 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9798}
9799#endif
9800
9801
9802/**
9803 * Stores a data qword.
9804 *
9805 * @returns Strict VBox status code.
9806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9807 * @param iSegReg The index of the segment register to use for
9808 * this access. The base and limits are checked.
9809 * @param GCPtrMem The address of the guest memory.
9810 * @param u64Value The value to store.
9811 */
9812IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9813{
9814 /* The lazy approach for now... */
9815 uint64_t *pu64Dst;
9816 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9817 if (rc == VINF_SUCCESS)
9818 {
9819 *pu64Dst = u64Value;
9820 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9821 }
9822 return rc;
9823}
9824
9825
9826#ifdef IEM_WITH_SETJMP
9827/**
9828 * Stores a data qword, longjmp on error.
9829 *
9830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9831 * @param iSegReg The index of the segment register to use for
9832 * this access. The base and limits are checked.
9833 * @param GCPtrMem The address of the guest memory.
9834 * @param u64Value The value to store.
9835 */
9836IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9837{
9838 /* The lazy approach for now... */
9839 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9840 *pu64Dst = u64Value;
9841 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9842}
9843#endif
9844
9845
9846/**
9847 * Stores a data dqword.
9848 *
9849 * @returns Strict VBox status code.
9850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9851 * @param iSegReg The index of the segment register to use for
9852 * this access. The base and limits are checked.
9853 * @param GCPtrMem The address of the guest memory.
9854 * @param u128Value The value to store.
9855 */
9856IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9857{
9858 /* The lazy approach for now... */
9859 PRTUINT128U pu128Dst;
9860 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9861 if (rc == VINF_SUCCESS)
9862 {
9863 pu128Dst->au64[0] = u128Value.au64[0];
9864 pu128Dst->au64[1] = u128Value.au64[1];
9865 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9866 }
9867 return rc;
9868}
9869
9870
9871#ifdef IEM_WITH_SETJMP
9872/**
9873 * Stores a data dqword, longjmp on error.
9874 *
9875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9876 * @param iSegReg The index of the segment register to use for
9877 * this access. The base and limits are checked.
9878 * @param GCPtrMem The address of the guest memory.
9879 * @param u128Value The value to store.
9880 */
9881IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9882{
9883 /* The lazy approach for now... */
9884 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9885 pu128Dst->au64[0] = u128Value.au64[0];
9886 pu128Dst->au64[1] = u128Value.au64[1];
9887 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9888}
9889#endif
9890
9891
9892/**
9893 * Stores a data dqword, SSE aligned.
9894 *
9895 * @returns Strict VBox status code.
9896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9897 * @param iSegReg The index of the segment register to use for
9898 * this access. The base and limits are checked.
9899 * @param GCPtrMem The address of the guest memory.
9900 * @param u128Value The value to store.
9901 */
9902IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9903{
9904 /* The lazy approach for now... */
9905 if ( (GCPtrMem & 15)
9906 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9907 return iemRaiseGeneralProtectionFault0(pVCpu);
9908
9909 PRTUINT128U pu128Dst;
9910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9911 if (rc == VINF_SUCCESS)
9912 {
9913 pu128Dst->au64[0] = u128Value.au64[0];
9914 pu128Dst->au64[1] = u128Value.au64[1];
9915 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9916 }
9917 return rc;
9918}
9919
9920
9921#ifdef IEM_WITH_SETJMP
9922/**
9923 * Stores a data dqword, SSE aligned.
9924 *
9925 * @returns Strict VBox status code.
9926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9927 * @param iSegReg The index of the segment register to use for
9928 * this access. The base and limits are checked.
9929 * @param GCPtrMem The address of the guest memory.
9930 * @param u128Value The value to store.
9931 */
9932DECL_NO_INLINE(IEM_STATIC, void)
9933iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9934{
9935 /* The lazy approach for now... */
9936 if ( (GCPtrMem & 15) == 0
9937 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9938 {
9939 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 pu128Dst->au64[0] = u128Value.au64[0];
9941 pu128Dst->au64[1] = u128Value.au64[1];
9942 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9943 return;
9944 }
9945
9946 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9947 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9948}
9949#endif
9950
9951
9952/**
9953 * Stores a data dqword.
9954 *
9955 * @returns Strict VBox status code.
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param iSegReg The index of the segment register to use for
9958 * this access. The base and limits are checked.
9959 * @param GCPtrMem The address of the guest memory.
9960 * @param pu256Value Pointer to the value to store.
9961 */
9962IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
9963{
9964 /* The lazy approach for now... */
9965 PRTUINT256U pu256Dst;
9966 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 if (rc == VINF_SUCCESS)
9968 {
9969 pu256Dst->au64[0] = pu256Value->au64[0];
9970 pu256Dst->au64[1] = pu256Value->au64[1];
9971 pu256Dst->au64[2] = pu256Value->au64[2];
9972 pu256Dst->au64[3] = pu256Value->au64[3];
9973 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
9974 }
9975 return rc;
9976}
9977
9978
9979#ifdef IEM_WITH_SETJMP
9980/**
9981 * Stores a data dqword, longjmp on error.
9982 *
9983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9984 * @param iSegReg The index of the segment register to use for
9985 * this access. The base and limits are checked.
9986 * @param GCPtrMem The address of the guest memory.
9987 * @param pu256Value Pointer to the value to store.
9988 */
9989IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
9990{
9991 /* The lazy approach for now... */
9992 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9993 pu256Dst->au64[0] = pu256Value->au64[0];
9994 pu256Dst->au64[1] = pu256Value->au64[1];
9995 pu256Dst->au64[2] = pu256Value->au64[2];
9996 pu256Dst->au64[3] = pu256Value->au64[3];
9997 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
9998}
9999#endif
10000
10001
10002/**
10003 * Stores a data dqword, AVX aligned.
10004 *
10005 * @returns Strict VBox status code.
10006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10007 * @param iSegReg The index of the segment register to use for
10008 * this access. The base and limits are checked.
10009 * @param GCPtrMem The address of the guest memory.
10010 * @param pu256Value Pointer to the value to store.
10011 */
10012IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10013{
10014 /* The lazy approach for now... */
10015 if (GCPtrMem & 31)
10016 return iemRaiseGeneralProtectionFault0(pVCpu);
10017
10018 PRTUINT256U pu256Dst;
10019 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10020 if (rc == VINF_SUCCESS)
10021 {
10022 pu256Dst->au64[0] = pu256Value->au64[0];
10023 pu256Dst->au64[1] = pu256Value->au64[1];
10024 pu256Dst->au64[2] = pu256Value->au64[2];
10025 pu256Dst->au64[3] = pu256Value->au64[3];
10026 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10027 }
10028 return rc;
10029}
10030
10031
10032#ifdef IEM_WITH_SETJMP
10033/**
10034 * Stores a data dqword, AVX aligned.
10035 *
10036 * @returns Strict VBox status code.
10037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10038 * @param iSegReg The index of the segment register to use for
10039 * this access. The base and limits are checked.
10040 * @param GCPtrMem The address of the guest memory.
10041 * @param pu256Value Pointer to the value to store.
10042 */
10043DECL_NO_INLINE(IEM_STATIC, void)
10044iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10045{
10046 /* The lazy approach for now... */
10047 if ((GCPtrMem & 31) == 0)
10048 {
10049 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10050 pu256Dst->au64[0] = pu256Value->au64[0];
10051 pu256Dst->au64[1] = pu256Value->au64[1];
10052 pu256Dst->au64[2] = pu256Value->au64[2];
10053 pu256Dst->au64[3] = pu256Value->au64[3];
10054 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10055 return;
10056 }
10057
10058 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10059 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10060}
10061#endif
10062
10063
10064/**
10065 * Stores a descriptor register (sgdt, sidt).
10066 *
10067 * @returns Strict VBox status code.
10068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10069 * @param cbLimit The limit.
10070 * @param GCPtrBase The base address.
10071 * @param iSegReg The index of the segment register to use for
10072 * this access. The base and limits are checked.
10073 * @param GCPtrMem The address of the guest memory.
10074 */
10075IEM_STATIC VBOXSTRICTRC
10076iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10077{
10078 /*
10079 * The SIDT and SGDT instructions actually stores the data using two
10080 * independent writes. The instructions does not respond to opsize prefixes.
10081 */
10082 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10083 if (rcStrict == VINF_SUCCESS)
10084 {
10085 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10086 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10087 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10088 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10089 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10090 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10091 else
10092 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10093 }
10094 return rcStrict;
10095}
10096
10097
10098/**
10099 * Pushes a word onto the stack.
10100 *
10101 * @returns Strict VBox status code.
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param u16Value The value to push.
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10106{
10107 /* Increment the stack pointer. */
10108 uint64_t uNewRsp;
10109 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10110
10111 /* Write the word the lazy way. */
10112 uint16_t *pu16Dst;
10113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10114 if (rc == VINF_SUCCESS)
10115 {
10116 *pu16Dst = u16Value;
10117 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10118 }
10119
10120 /* Commit the new RSP value unless we an access handler made trouble. */
10121 if (rc == VINF_SUCCESS)
10122 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10123
10124 return rc;
10125}
10126
10127
10128/**
10129 * Pushes a dword onto the stack.
10130 *
10131 * @returns Strict VBox status code.
10132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10133 * @param u32Value The value to push.
10134 */
10135IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10136{
10137 /* Increment the stack pointer. */
10138 uint64_t uNewRsp;
10139 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10140
10141 /* Write the dword the lazy way. */
10142 uint32_t *pu32Dst;
10143 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10144 if (rc == VINF_SUCCESS)
10145 {
10146 *pu32Dst = u32Value;
10147 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10148 }
10149
10150 /* Commit the new RSP value unless we an access handler made trouble. */
10151 if (rc == VINF_SUCCESS)
10152 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10153
10154 return rc;
10155}
10156
10157
10158/**
10159 * Pushes a dword segment register value onto the stack.
10160 *
10161 * @returns Strict VBox status code.
10162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10163 * @param u32Value The value to push.
10164 */
10165IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10166{
10167 /* Increment the stack pointer. */
10168 uint64_t uNewRsp;
10169 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10170
10171 /* The intel docs talks about zero extending the selector register
10172 value. My actual intel CPU here might be zero extending the value
10173 but it still only writes the lower word... */
10174 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10175 * happens when crossing an electric page boundrary, is the high word checked
10176 * for write accessibility or not? Probably it is. What about segment limits?
10177 * It appears this behavior is also shared with trap error codes.
10178 *
10179 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10180 * ancient hardware when it actually did change. */
10181 uint16_t *pu16Dst;
10182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10183 if (rc == VINF_SUCCESS)
10184 {
10185 *pu16Dst = (uint16_t)u32Value;
10186 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10187 }
10188
10189 /* Commit the new RSP value unless we an access handler made trouble. */
10190 if (rc == VINF_SUCCESS)
10191 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10192
10193 return rc;
10194}
10195
10196
10197/**
10198 * Pushes a qword onto the stack.
10199 *
10200 * @returns Strict VBox status code.
10201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10202 * @param u64Value The value to push.
10203 */
10204IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10205{
10206 /* Increment the stack pointer. */
10207 uint64_t uNewRsp;
10208 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10209
10210 /* Write the word the lazy way. */
10211 uint64_t *pu64Dst;
10212 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10213 if (rc == VINF_SUCCESS)
10214 {
10215 *pu64Dst = u64Value;
10216 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10217 }
10218
10219 /* Commit the new RSP value unless we an access handler made trouble. */
10220 if (rc == VINF_SUCCESS)
10221 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10222
10223 return rc;
10224}
10225
10226
10227/**
10228 * Pops a word from the stack.
10229 *
10230 * @returns Strict VBox status code.
10231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10232 * @param pu16Value Where to store the popped value.
10233 */
10234IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10235{
10236 /* Increment the stack pointer. */
10237 uint64_t uNewRsp;
10238 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10239
10240 /* Write the word the lazy way. */
10241 uint16_t const *pu16Src;
10242 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10243 if (rc == VINF_SUCCESS)
10244 {
10245 *pu16Value = *pu16Src;
10246 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10247
10248 /* Commit the new RSP value. */
10249 if (rc == VINF_SUCCESS)
10250 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10251 }
10252
10253 return rc;
10254}
10255
10256
10257/**
10258 * Pops a dword from the stack.
10259 *
10260 * @returns Strict VBox status code.
10261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10262 * @param pu32Value Where to store the popped value.
10263 */
10264IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10265{
10266 /* Increment the stack pointer. */
10267 uint64_t uNewRsp;
10268 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10269
10270 /* Write the word the lazy way. */
10271 uint32_t const *pu32Src;
10272 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10273 if (rc == VINF_SUCCESS)
10274 {
10275 *pu32Value = *pu32Src;
10276 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10277
10278 /* Commit the new RSP value. */
10279 if (rc == VINF_SUCCESS)
10280 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10281 }
10282
10283 return rc;
10284}
10285
10286
10287/**
10288 * Pops a qword from the stack.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param pu64Value Where to store the popped value.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10295{
10296 /* Increment the stack pointer. */
10297 uint64_t uNewRsp;
10298 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10299
10300 /* Write the word the lazy way. */
10301 uint64_t const *pu64Src;
10302 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10303 if (rc == VINF_SUCCESS)
10304 {
10305 *pu64Value = *pu64Src;
10306 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10307
10308 /* Commit the new RSP value. */
10309 if (rc == VINF_SUCCESS)
10310 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10311 }
10312
10313 return rc;
10314}
10315
10316
10317/**
10318 * Pushes a word onto the stack, using a temporary stack pointer.
10319 *
10320 * @returns Strict VBox status code.
10321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10322 * @param u16Value The value to push.
10323 * @param pTmpRsp Pointer to the temporary stack pointer.
10324 */
10325IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10326{
10327 /* Increment the stack pointer. */
10328 RTUINT64U NewRsp = *pTmpRsp;
10329 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10330
10331 /* Write the word the lazy way. */
10332 uint16_t *pu16Dst;
10333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10334 if (rc == VINF_SUCCESS)
10335 {
10336 *pu16Dst = u16Value;
10337 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10338 }
10339
10340 /* Commit the new RSP value unless we an access handler made trouble. */
10341 if (rc == VINF_SUCCESS)
10342 *pTmpRsp = NewRsp;
10343
10344 return rc;
10345}
10346
10347
10348/**
10349 * Pushes a dword onto the stack, using a temporary stack pointer.
10350 *
10351 * @returns Strict VBox status code.
10352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10353 * @param u32Value The value to push.
10354 * @param pTmpRsp Pointer to the temporary stack pointer.
10355 */
10356IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10357{
10358 /* Increment the stack pointer. */
10359 RTUINT64U NewRsp = *pTmpRsp;
10360 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10361
10362 /* Write the word the lazy way. */
10363 uint32_t *pu32Dst;
10364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10365 if (rc == VINF_SUCCESS)
10366 {
10367 *pu32Dst = u32Value;
10368 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10369 }
10370
10371 /* Commit the new RSP value unless we an access handler made trouble. */
10372 if (rc == VINF_SUCCESS)
10373 *pTmpRsp = NewRsp;
10374
10375 return rc;
10376}
10377
10378
10379/**
10380 * Pushes a dword onto the stack, using a temporary stack pointer.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param u64Value The value to push.
10385 * @param pTmpRsp Pointer to the temporary stack pointer.
10386 */
10387IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10388{
10389 /* Increment the stack pointer. */
10390 RTUINT64U NewRsp = *pTmpRsp;
10391 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10392
10393 /* Write the word the lazy way. */
10394 uint64_t *pu64Dst;
10395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10396 if (rc == VINF_SUCCESS)
10397 {
10398 *pu64Dst = u64Value;
10399 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10400 }
10401
10402 /* Commit the new RSP value unless we an access handler made trouble. */
10403 if (rc == VINF_SUCCESS)
10404 *pTmpRsp = NewRsp;
10405
10406 return rc;
10407}
10408
10409
10410/**
10411 * Pops a word from the stack, using a temporary stack pointer.
10412 *
10413 * @returns Strict VBox status code.
10414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10415 * @param pu16Value Where to store the popped value.
10416 * @param pTmpRsp Pointer to the temporary stack pointer.
10417 */
10418IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10419{
10420 /* Increment the stack pointer. */
10421 RTUINT64U NewRsp = *pTmpRsp;
10422 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10423
10424 /* Write the word the lazy way. */
10425 uint16_t const *pu16Src;
10426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10427 if (rc == VINF_SUCCESS)
10428 {
10429 *pu16Value = *pu16Src;
10430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10431
10432 /* Commit the new RSP value. */
10433 if (rc == VINF_SUCCESS)
10434 *pTmpRsp = NewRsp;
10435 }
10436
10437 return rc;
10438}
10439
10440
10441/**
10442 * Pops a dword from the stack, using a temporary stack pointer.
10443 *
10444 * @returns Strict VBox status code.
10445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10446 * @param pu32Value Where to store the popped value.
10447 * @param pTmpRsp Pointer to the temporary stack pointer.
10448 */
10449IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10450{
10451 /* Increment the stack pointer. */
10452 RTUINT64U NewRsp = *pTmpRsp;
10453 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10454
10455 /* Write the word the lazy way. */
10456 uint32_t const *pu32Src;
10457 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10458 if (rc == VINF_SUCCESS)
10459 {
10460 *pu32Value = *pu32Src;
10461 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10462
10463 /* Commit the new RSP value. */
10464 if (rc == VINF_SUCCESS)
10465 *pTmpRsp = NewRsp;
10466 }
10467
10468 return rc;
10469}
10470
10471
10472/**
10473 * Pops a qword from the stack, using a temporary stack pointer.
10474 *
10475 * @returns Strict VBox status code.
10476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10477 * @param pu64Value Where to store the popped value.
10478 * @param pTmpRsp Pointer to the temporary stack pointer.
10479 */
10480IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10481{
10482 /* Increment the stack pointer. */
10483 RTUINT64U NewRsp = *pTmpRsp;
10484 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10485
10486 /* Write the word the lazy way. */
10487 uint64_t const *pu64Src;
10488 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10489 if (rcStrict == VINF_SUCCESS)
10490 {
10491 *pu64Value = *pu64Src;
10492 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10493
10494 /* Commit the new RSP value. */
10495 if (rcStrict == VINF_SUCCESS)
10496 *pTmpRsp = NewRsp;
10497 }
10498
10499 return rcStrict;
10500}
10501
10502
10503/**
10504 * Begin a special stack push (used by interrupt, exceptions and such).
10505 *
10506 * This will raise \#SS or \#PF if appropriate.
10507 *
10508 * @returns Strict VBox status code.
10509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10510 * @param cbMem The number of bytes to push onto the stack.
10511 * @param ppvMem Where to return the pointer to the stack memory.
10512 * As with the other memory functions this could be
10513 * direct access or bounce buffered access, so
10514 * don't commit register until the commit call
10515 * succeeds.
10516 * @param puNewRsp Where to return the new RSP value. This must be
10517 * passed unchanged to
10518 * iemMemStackPushCommitSpecial().
10519 */
10520IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10521{
10522 Assert(cbMem < UINT8_MAX);
10523 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10524 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10525}
10526
10527
10528/**
10529 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10530 *
10531 * This will update the rSP.
10532 *
10533 * @returns Strict VBox status code.
10534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10535 * @param pvMem The pointer returned by
10536 * iemMemStackPushBeginSpecial().
10537 * @param uNewRsp The new RSP value returned by
10538 * iemMemStackPushBeginSpecial().
10539 */
10540IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10541{
10542 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10543 if (rcStrict == VINF_SUCCESS)
10544 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10545 return rcStrict;
10546}
10547
10548
10549/**
10550 * Begin a special stack pop (used by iret, retf and such).
10551 *
10552 * This will raise \#SS or \#PF if appropriate.
10553 *
10554 * @returns Strict VBox status code.
10555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10556 * @param cbMem The number of bytes to pop from the stack.
10557 * @param ppvMem Where to return the pointer to the stack memory.
10558 * @param puNewRsp Where to return the new RSP value. This must be
10559 * assigned to CPUMCTX::rsp manually some time
10560 * after iemMemStackPopDoneSpecial() has been
10561 * called.
10562 */
10563IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10564{
10565 Assert(cbMem < UINT8_MAX);
10566 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10567 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10568}
10569
10570
10571/**
10572 * Continue a special stack pop (used by iret and retf).
10573 *
10574 * This will raise \#SS or \#PF if appropriate.
10575 *
10576 * @returns Strict VBox status code.
10577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10578 * @param cbMem The number of bytes to pop from the stack.
10579 * @param ppvMem Where to return the pointer to the stack memory.
10580 * @param puNewRsp Where to return the new RSP value. This must be
10581 * assigned to CPUMCTX::rsp manually some time
10582 * after iemMemStackPopDoneSpecial() has been
10583 * called.
10584 */
10585IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10586{
10587 Assert(cbMem < UINT8_MAX);
10588 RTUINT64U NewRsp;
10589 NewRsp.u = *puNewRsp;
10590 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10591 *puNewRsp = NewRsp.u;
10592 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10593}
10594
10595
10596/**
10597 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10598 * iemMemStackPopContinueSpecial).
10599 *
10600 * The caller will manually commit the rSP.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param pvMem The pointer returned by
10605 * iemMemStackPopBeginSpecial() or
10606 * iemMemStackPopContinueSpecial().
10607 */
10608IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10609{
10610 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10611}
10612
10613
10614/**
10615 * Fetches a system table byte.
10616 *
10617 * @returns Strict VBox status code.
10618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10619 * @param pbDst Where to return the byte.
10620 * @param iSegReg The index of the segment register to use for
10621 * this access. The base and limits are checked.
10622 * @param GCPtrMem The address of the guest memory.
10623 */
10624IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10625{
10626 /* The lazy approach for now... */
10627 uint8_t const *pbSrc;
10628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10629 if (rc == VINF_SUCCESS)
10630 {
10631 *pbDst = *pbSrc;
10632 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10633 }
10634 return rc;
10635}
10636
10637
10638/**
10639 * Fetches a system table word.
10640 *
10641 * @returns Strict VBox status code.
10642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10643 * @param pu16Dst Where to return the word.
10644 * @param iSegReg The index of the segment register to use for
10645 * this access. The base and limits are checked.
10646 * @param GCPtrMem The address of the guest memory.
10647 */
10648IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10649{
10650 /* The lazy approach for now... */
10651 uint16_t const *pu16Src;
10652 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10653 if (rc == VINF_SUCCESS)
10654 {
10655 *pu16Dst = *pu16Src;
10656 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10657 }
10658 return rc;
10659}
10660
10661
10662/**
10663 * Fetches a system table dword.
10664 *
10665 * @returns Strict VBox status code.
10666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10667 * @param pu32Dst Where to return the dword.
10668 * @param iSegReg The index of the segment register to use for
10669 * this access. The base and limits are checked.
10670 * @param GCPtrMem The address of the guest memory.
10671 */
10672IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10673{
10674 /* The lazy approach for now... */
10675 uint32_t const *pu32Src;
10676 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10677 if (rc == VINF_SUCCESS)
10678 {
10679 *pu32Dst = *pu32Src;
10680 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10681 }
10682 return rc;
10683}
10684
10685
10686/**
10687 * Fetches a system table qword.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param pu64Dst Where to return the qword.
10692 * @param iSegReg The index of the segment register to use for
10693 * this access. The base and limits are checked.
10694 * @param GCPtrMem The address of the guest memory.
10695 */
10696IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10697{
10698 /* The lazy approach for now... */
10699 uint64_t const *pu64Src;
10700 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10701 if (rc == VINF_SUCCESS)
10702 {
10703 *pu64Dst = *pu64Src;
10704 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10705 }
10706 return rc;
10707}
10708
10709
10710/**
10711 * Fetches a descriptor table entry with caller specified error code.
10712 *
10713 * @returns Strict VBox status code.
10714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10715 * @param pDesc Where to return the descriptor table entry.
10716 * @param uSel The selector which table entry to fetch.
10717 * @param uXcpt The exception to raise on table lookup error.
10718 * @param uErrorCode The error code associated with the exception.
10719 */
10720IEM_STATIC VBOXSTRICTRC
10721iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10722{
10723 AssertPtr(pDesc);
10724 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10725
10726 /** @todo did the 286 require all 8 bytes to be accessible? */
10727 /*
10728 * Get the selector table base and check bounds.
10729 */
10730 RTGCPTR GCPtrBase;
10731 if (uSel & X86_SEL_LDT)
10732 {
10733 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10734 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10735 {
10736 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10737 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10738 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10739 uErrorCode, 0);
10740 }
10741
10742 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10743 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10744 }
10745 else
10746 {
10747 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10748 {
10749 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10750 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10751 uErrorCode, 0);
10752 }
10753 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10754 }
10755
10756 /*
10757 * Read the legacy descriptor and maybe the long mode extensions if
10758 * required.
10759 */
10760 VBOXSTRICTRC rcStrict;
10761 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10762 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10763 else
10764 {
10765 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10766 if (rcStrict == VINF_SUCCESS)
10767 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10768 if (rcStrict == VINF_SUCCESS)
10769 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10770 if (rcStrict == VINF_SUCCESS)
10771 pDesc->Legacy.au16[3] = 0;
10772 else
10773 return rcStrict;
10774 }
10775
10776 if (rcStrict == VINF_SUCCESS)
10777 {
10778 if ( !IEM_IS_LONG_MODE(pVCpu)
10779 || pDesc->Legacy.Gen.u1DescType)
10780 pDesc->Long.au64[1] = 0;
10781 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10782 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10783 else
10784 {
10785 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10786 /** @todo is this the right exception? */
10787 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10788 }
10789 }
10790 return rcStrict;
10791}
10792
10793
10794/**
10795 * Fetches a descriptor table entry.
10796 *
10797 * @returns Strict VBox status code.
10798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10799 * @param pDesc Where to return the descriptor table entry.
10800 * @param uSel The selector which table entry to fetch.
10801 * @param uXcpt The exception to raise on table lookup error.
10802 */
10803IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10804{
10805 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10806}
10807
10808
10809/**
10810 * Fakes a long mode stack selector for SS = 0.
10811 *
10812 * @param pDescSs Where to return the fake stack descriptor.
10813 * @param uDpl The DPL we want.
10814 */
10815IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10816{
10817 pDescSs->Long.au64[0] = 0;
10818 pDescSs->Long.au64[1] = 0;
10819 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10820 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10821 pDescSs->Long.Gen.u2Dpl = uDpl;
10822 pDescSs->Long.Gen.u1Present = 1;
10823 pDescSs->Long.Gen.u1Long = 1;
10824}
10825
10826
10827/**
10828 * Marks the selector descriptor as accessed (only non-system descriptors).
10829 *
10830 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10831 * will therefore skip the limit checks.
10832 *
10833 * @returns Strict VBox status code.
10834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10835 * @param uSel The selector.
10836 */
10837IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10838{
10839 /*
10840 * Get the selector table base and calculate the entry address.
10841 */
10842 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10843 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10844 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10845 GCPtr += uSel & X86_SEL_MASK;
10846
10847 /*
10848 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10849 * ugly stuff to avoid this. This will make sure it's an atomic access
10850 * as well more or less remove any question about 8-bit or 32-bit accesss.
10851 */
10852 VBOXSTRICTRC rcStrict;
10853 uint32_t volatile *pu32;
10854 if ((GCPtr & 3) == 0)
10855 {
10856 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10857 GCPtr += 2 + 2;
10858 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10859 if (rcStrict != VINF_SUCCESS)
10860 return rcStrict;
10861 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10862 }
10863 else
10864 {
10865 /* The misaligned GDT/LDT case, map the whole thing. */
10866 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10867 if (rcStrict != VINF_SUCCESS)
10868 return rcStrict;
10869 switch ((uintptr_t)pu32 & 3)
10870 {
10871 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10872 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10873 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10874 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10875 }
10876 }
10877
10878 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10879}
10880
10881/** @} */
10882
10883
10884/*
10885 * Include the C/C++ implementation of instruction.
10886 */
10887#include "IEMAllCImpl.cpp.h"
10888
10889
10890
10891/** @name "Microcode" macros.
10892 *
10893 * The idea is that we should be able to use the same code to interpret
10894 * instructions as well as recompiler instructions. Thus this obfuscation.
10895 *
10896 * @{
10897 */
10898#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10899#define IEM_MC_END() }
10900#define IEM_MC_PAUSE() do {} while (0)
10901#define IEM_MC_CONTINUE() do {} while (0)
10902
10903/** Internal macro. */
10904#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10905 do \
10906 { \
10907 VBOXSTRICTRC rcStrict2 = a_Expr; \
10908 if (rcStrict2 != VINF_SUCCESS) \
10909 return rcStrict2; \
10910 } while (0)
10911
10912
10913#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10914#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10915#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10916#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10917#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10918#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10919#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10920#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10921#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10922 do { \
10923 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10924 return iemRaiseDeviceNotAvailable(pVCpu); \
10925 } while (0)
10926#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10927 do { \
10928 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10929 return iemRaiseDeviceNotAvailable(pVCpu); \
10930 } while (0)
10931#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10932 do { \
10933 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10934 return iemRaiseMathFault(pVCpu); \
10935 } while (0)
10936#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
10937 do { \
10938 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
10939 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
10940 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
10941 return iemRaiseUndefinedOpcode(pVCpu); \
10942 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10943 return iemRaiseDeviceNotAvailable(pVCpu); \
10944 } while (0)
10945#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
10946 do { \
10947 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
10948 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
10949 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
10950 return iemRaiseUndefinedOpcode(pVCpu); \
10951 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10952 return iemRaiseDeviceNotAvailable(pVCpu); \
10953 } while (0)
10954#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
10955 do { \
10956 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10957 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10958 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
10959 return iemRaiseUndefinedOpcode(pVCpu); \
10960 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10961 return iemRaiseDeviceNotAvailable(pVCpu); \
10962 } while (0)
10963#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10964 do { \
10965 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10966 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10967 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10968 return iemRaiseUndefinedOpcode(pVCpu); \
10969 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10970 return iemRaiseDeviceNotAvailable(pVCpu); \
10971 } while (0)
10972#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10973 do { \
10974 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10975 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10976 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10977 return iemRaiseUndefinedOpcode(pVCpu); \
10978 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10979 return iemRaiseDeviceNotAvailable(pVCpu); \
10980 } while (0)
10981#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10982 do { \
10983 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10984 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
10985 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10986 return iemRaiseUndefinedOpcode(pVCpu); \
10987 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10988 return iemRaiseDeviceNotAvailable(pVCpu); \
10989 } while (0)
10990#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10991 do { \
10992 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
10993 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10994 return iemRaiseUndefinedOpcode(pVCpu); \
10995 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
10996 return iemRaiseDeviceNotAvailable(pVCpu); \
10997 } while (0)
10998#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10999 do { \
11000 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11001 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11002 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11003 return iemRaiseUndefinedOpcode(pVCpu); \
11004 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11005 return iemRaiseDeviceNotAvailable(pVCpu); \
11006 } while (0)
11007#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11008 do { \
11009 if (pVCpu->iem.s.uCpl != 0) \
11010 return iemRaiseGeneralProtectionFault0(pVCpu); \
11011 } while (0)
11012#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11013 do { \
11014 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11015 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11016 } while (0)
11017#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11018 do { \
11019 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11020 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11021 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11022 return iemRaiseUndefinedOpcode(pVCpu); \
11023 } while (0)
11024#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11025 do { \
11026 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11027 return iemRaiseGeneralProtectionFault0(pVCpu); \
11028 } while (0)
11029
11030
11031#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11032#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11033#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11034#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11035#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11036#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11037#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11038 uint32_t a_Name; \
11039 uint32_t *a_pName = &a_Name
11040#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11041 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11042
11043#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11044#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11045
11046#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11047#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11048#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11049#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11050#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11051#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11052#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11053#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11054#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11055#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11056#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11057#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11058#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11059#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11060#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11061#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11062#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11063#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11064 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11065 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11066 } while (0)
11067#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11068 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11069 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11070 } while (0)
11071#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11072 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11073 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11074 } while (0)
11075/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11076#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11077 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11078 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11079 } while (0)
11080#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11081 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11082 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11083 } while (0)
11084/** @note Not for IOPL or IF testing or modification. */
11085#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11086#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11087#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11088#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11089
11090#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11091#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11092#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11093#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11094#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11095#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11096#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11097#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11098#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11099#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11100/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11101#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11102 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11103 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11104 } while (0)
11105#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11106 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11107 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11108 } while (0)
11109#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11110 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11111
11112
11113#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11114#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11115/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11116 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11117#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11118#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11119/** @note Not for IOPL or IF testing or modification. */
11120#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11121
11122#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11123#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11124#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11125 do { \
11126 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11127 *pu32Reg += (a_u32Value); \
11128 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11129 } while (0)
11130#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11131
11132#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11133#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11134#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11135 do { \
11136 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11137 *pu32Reg -= (a_u32Value); \
11138 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11139 } while (0)
11140#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11141#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11142
11143#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11144#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11145#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11146#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11147#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11148#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11149#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11150
11151#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11152#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11153#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11154#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11155
11156#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11157#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11158#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11159
11160#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11161#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11162#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11163
11164#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11165#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11166#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11167
11168#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11169#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11170#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11171
11172#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11173
11174#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11175
11176#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11177#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11178#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11179 do { \
11180 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11181 *pu32Reg &= (a_u32Value); \
11182 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11183 } while (0)
11184#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11185
11186#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11187#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11188#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11189 do { \
11190 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11191 *pu32Reg |= (a_u32Value); \
11192 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11193 } while (0)
11194#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11195
11196
11197/** @note Not for IOPL or IF modification. */
11198#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11199/** @note Not for IOPL or IF modification. */
11200#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11201/** @note Not for IOPL or IF modification. */
11202#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11203
11204#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11205
11206/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11207#define IEM_MC_FPU_TO_MMX_MODE() do { \
11208 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11209 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11210 } while (0)
11211
11212/** Switches the FPU state from MMX mode (FTW=0xffff). */
11213#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11214 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11215 } while (0)
11216
11217#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11218 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11219#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11220 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11221#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11222 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11223 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11224 } while (0)
11225#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11226 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11227 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11228 } while (0)
11229#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11230 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11231#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11232 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11233#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11234 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11235
11236#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11237 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11238 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11239 } while (0)
11240#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11241 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11242#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11243 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11244#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11245 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11246#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11247 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11248 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11249 } while (0)
11250#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11251 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11252#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11253 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11254 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11255 } while (0)
11256#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11257 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11258#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11259 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11260 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11261 } while (0)
11262#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11263 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11264#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11265 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11266#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11267 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11268#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11269 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11270#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11271 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11272 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11273 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11274 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11275 } while (0)
11276
11277#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11278 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11279 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11280 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11281 } while (0)
11282#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11283 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11284 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11285 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11286 } while (0)
11287#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11288 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11289 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11290 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11291 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11292 } while (0)
11293#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11294 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11295 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11296 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11297 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11298 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11299 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11300 } while (0)
11301
11302#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11303#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11304 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11305 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11306 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11307 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11308 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11309 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11310 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11311 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11312 } while (0)
11313#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11314 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11315 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11316 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11317 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11318 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11319 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11320 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11321 } while (0)
11322#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11323 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11324 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11325 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11326 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11327 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11328 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11329 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11330 } while (0)
11331#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11332 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11333 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11334 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11335 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11336 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11337 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11338 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11339 } while (0)
11340
11341#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11342 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11343#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11344 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11345#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11346 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11347#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11348 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11349 uintptr_t const iYRegTmp = (a_iYReg); \
11350 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11351 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11352 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11353 } while (0)
11354
11355#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11356 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11357 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11358 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11359 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11360 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11361 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11362 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11363 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11364 } while (0)
11365#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11366 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11367 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11368 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11369 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11370 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11371 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11372 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11373 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11374 } while (0)
11375#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11376 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11377 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11378 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11379 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11380 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11381 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11382 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11383 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11384 } while (0)
11385
11386#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11387 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11388 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11389 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11390 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11391 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11392 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11393 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11394 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11395 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11396 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11397 } while (0)
11398#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11399 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11400 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11401 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11402 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11403 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11404 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11405 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11406 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11407 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11408 } while (0)
11409#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11410 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11411 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11412 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11413 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11414 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11415 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11416 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11417 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11418 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11419 } while (0)
11420#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11421 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11422 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11423 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11424 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11425 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11426 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11427 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11428 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11429 } while (0)
11430
11431#ifndef IEM_WITH_SETJMP
11432# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11433 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11434# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11435 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11436# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11437 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11438#else
11439# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11440 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11441# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11442 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11443# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11444 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11445#endif
11446
11447#ifndef IEM_WITH_SETJMP
11448# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11449 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11450# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11451 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11452# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11453 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11454#else
11455# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11456 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11457# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11458 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11459# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11460 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11461#endif
11462
11463#ifndef IEM_WITH_SETJMP
11464# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11465 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11466# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11467 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11468# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11469 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11470#else
11471# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11472 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11473# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11474 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11475# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11476 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11477#endif
11478
11479#ifdef SOME_UNUSED_FUNCTION
11480# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11482#endif
11483
11484#ifndef IEM_WITH_SETJMP
11485# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11486 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11487# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11488 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11489# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11490 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11491# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11492 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11493#else
11494# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11495 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11496# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11497 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11498# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11499 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11500# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11501 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11502#endif
11503
11504#ifndef IEM_WITH_SETJMP
11505# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11506 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11507# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11508 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11509# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11510 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11511#else
11512# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11513 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11514# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11515 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11516# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11517 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11518#endif
11519
11520#ifndef IEM_WITH_SETJMP
11521# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11522 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11523# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11524 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11525#else
11526# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11527 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11528# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11529 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11530#endif
11531
11532#ifndef IEM_WITH_SETJMP
11533# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11534 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11535# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11536 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11537#else
11538# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11539 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11540# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11541 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11542#endif
11543
11544
11545
11546#ifndef IEM_WITH_SETJMP
11547# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11548 do { \
11549 uint8_t u8Tmp; \
11550 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11551 (a_u16Dst) = u8Tmp; \
11552 } while (0)
11553# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11554 do { \
11555 uint8_t u8Tmp; \
11556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11557 (a_u32Dst) = u8Tmp; \
11558 } while (0)
11559# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11560 do { \
11561 uint8_t u8Tmp; \
11562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11563 (a_u64Dst) = u8Tmp; \
11564 } while (0)
11565# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11566 do { \
11567 uint16_t u16Tmp; \
11568 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11569 (a_u32Dst) = u16Tmp; \
11570 } while (0)
11571# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11572 do { \
11573 uint16_t u16Tmp; \
11574 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11575 (a_u64Dst) = u16Tmp; \
11576 } while (0)
11577# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11578 do { \
11579 uint32_t u32Tmp; \
11580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11581 (a_u64Dst) = u32Tmp; \
11582 } while (0)
11583#else /* IEM_WITH_SETJMP */
11584# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11585 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11586# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11587 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11588# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11589 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11590# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11591 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11592# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11593 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11594# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11595 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11596#endif /* IEM_WITH_SETJMP */
11597
11598#ifndef IEM_WITH_SETJMP
11599# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11600 do { \
11601 uint8_t u8Tmp; \
11602 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11603 (a_u16Dst) = (int8_t)u8Tmp; \
11604 } while (0)
11605# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11606 do { \
11607 uint8_t u8Tmp; \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11609 (a_u32Dst) = (int8_t)u8Tmp; \
11610 } while (0)
11611# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11612 do { \
11613 uint8_t u8Tmp; \
11614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11615 (a_u64Dst) = (int8_t)u8Tmp; \
11616 } while (0)
11617# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11618 do { \
11619 uint16_t u16Tmp; \
11620 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11621 (a_u32Dst) = (int16_t)u16Tmp; \
11622 } while (0)
11623# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11624 do { \
11625 uint16_t u16Tmp; \
11626 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11627 (a_u64Dst) = (int16_t)u16Tmp; \
11628 } while (0)
11629# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11630 do { \
11631 uint32_t u32Tmp; \
11632 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11633 (a_u64Dst) = (int32_t)u32Tmp; \
11634 } while (0)
11635#else /* IEM_WITH_SETJMP */
11636# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11637 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11638# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11639 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11640# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11641 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11642# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11643 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11644# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11645 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11647 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11648#endif /* IEM_WITH_SETJMP */
11649
11650#ifndef IEM_WITH_SETJMP
11651# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11653# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11655# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11657# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11659#else
11660# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11661 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11662# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11663 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11664# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11665 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11666# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11667 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11668#endif
11669
11670#ifndef IEM_WITH_SETJMP
11671# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11673# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11675# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11677# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11679#else
11680# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11681 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11682# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11683 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11684# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11685 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11686# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11687 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11688#endif
11689
11690#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11691#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11692#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11693#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11694#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11695#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11696#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11697 do { \
11698 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11699 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11700 } while (0)
11701
11702#ifndef IEM_WITH_SETJMP
11703# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11705# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11707#else
11708# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11709 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11710# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11711 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11712#endif
11713
11714#ifndef IEM_WITH_SETJMP
11715# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11717# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11719#else
11720# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11721 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11722# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11723 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11724#endif
11725
11726
11727#define IEM_MC_PUSH_U16(a_u16Value) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11729#define IEM_MC_PUSH_U32(a_u32Value) \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11731#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11733#define IEM_MC_PUSH_U64(a_u64Value) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11735
11736#define IEM_MC_POP_U16(a_pu16Value) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11738#define IEM_MC_POP_U32(a_pu32Value) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11740#define IEM_MC_POP_U64(a_pu64Value) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11742
11743/** Maps guest memory for direct or bounce buffered access.
11744 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11745 * @remarks May return.
11746 */
11747#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11749
11750/** Maps guest memory for direct or bounce buffered access.
11751 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11752 * @remarks May return.
11753 */
11754#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11756
11757/** Commits the memory and unmaps the guest memory.
11758 * @remarks May return.
11759 */
11760#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11762
11763/** Commits the memory and unmaps the guest memory unless the FPU status word
11764 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11765 * that would cause FLD not to store.
11766 *
11767 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11768 * store, while \#P will not.
11769 *
11770 * @remarks May in theory return - for now.
11771 */
11772#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11773 do { \
11774 if ( !(a_u16FSW & X86_FSW_ES) \
11775 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11776 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11778 } while (0)
11779
11780/** Calculate efficient address from R/M. */
11781#ifndef IEM_WITH_SETJMP
11782# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11783 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11784#else
11785# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11786 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11787#endif
11788
11789#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11790#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11791#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11792#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11793#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11794#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11795#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11796
11797/**
11798 * Defers the rest of the instruction emulation to a C implementation routine
11799 * and returns, only taking the standard parameters.
11800 *
11801 * @param a_pfnCImpl The pointer to the C routine.
11802 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11803 */
11804#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11805
11806/**
11807 * Defers the rest of instruction emulation to a C implementation routine and
11808 * returns, taking one argument in addition to the standard ones.
11809 *
11810 * @param a_pfnCImpl The pointer to the C routine.
11811 * @param a0 The argument.
11812 */
11813#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11814
11815/**
11816 * Defers the rest of the instruction emulation to a C implementation routine
11817 * and returns, taking two arguments in addition to the standard ones.
11818 *
11819 * @param a_pfnCImpl The pointer to the C routine.
11820 * @param a0 The first extra argument.
11821 * @param a1 The second extra argument.
11822 */
11823#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11824
11825/**
11826 * Defers the rest of the instruction emulation to a C implementation routine
11827 * and returns, taking three arguments in addition to the standard ones.
11828 *
11829 * @param a_pfnCImpl The pointer to the C routine.
11830 * @param a0 The first extra argument.
11831 * @param a1 The second extra argument.
11832 * @param a2 The third extra argument.
11833 */
11834#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11835
11836/**
11837 * Defers the rest of the instruction emulation to a C implementation routine
11838 * and returns, taking four arguments in addition to the standard ones.
11839 *
11840 * @param a_pfnCImpl The pointer to the C routine.
11841 * @param a0 The first extra argument.
11842 * @param a1 The second extra argument.
11843 * @param a2 The third extra argument.
11844 * @param a3 The fourth extra argument.
11845 */
11846#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11847
11848/**
11849 * Defers the rest of the instruction emulation to a C implementation routine
11850 * and returns, taking two arguments in addition to the standard ones.
11851 *
11852 * @param a_pfnCImpl The pointer to the C routine.
11853 * @param a0 The first extra argument.
11854 * @param a1 The second extra argument.
11855 * @param a2 The third extra argument.
11856 * @param a3 The fourth extra argument.
11857 * @param a4 The fifth extra argument.
11858 */
11859#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11860
11861/**
11862 * Defers the entire instruction emulation to a C implementation routine and
11863 * returns, only taking the standard parameters.
11864 *
11865 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11866 *
11867 * @param a_pfnCImpl The pointer to the C routine.
11868 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11869 */
11870#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11871
11872/**
11873 * Defers the entire instruction emulation to a C implementation routine and
11874 * returns, taking one argument in addition to the standard ones.
11875 *
11876 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11877 *
11878 * @param a_pfnCImpl The pointer to the C routine.
11879 * @param a0 The argument.
11880 */
11881#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11882
11883/**
11884 * Defers the entire instruction emulation to a C implementation routine and
11885 * returns, taking two arguments in addition to the standard ones.
11886 *
11887 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11888 *
11889 * @param a_pfnCImpl The pointer to the C routine.
11890 * @param a0 The first extra argument.
11891 * @param a1 The second extra argument.
11892 */
11893#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11894
11895/**
11896 * Defers the entire instruction emulation to a C implementation routine and
11897 * returns, taking three arguments in addition to the standard ones.
11898 *
11899 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11900 *
11901 * @param a_pfnCImpl The pointer to the C routine.
11902 * @param a0 The first extra argument.
11903 * @param a1 The second extra argument.
11904 * @param a2 The third extra argument.
11905 */
11906#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11907
11908/**
11909 * Calls a FPU assembly implementation taking one visible argument.
11910 *
11911 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11912 * @param a0 The first extra argument.
11913 */
11914#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11915 do { \
11916 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
11917 } while (0)
11918
11919/**
11920 * Calls a FPU assembly implementation taking two visible arguments.
11921 *
11922 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11923 * @param a0 The first extra argument.
11924 * @param a1 The second extra argument.
11925 */
11926#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11927 do { \
11928 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
11929 } while (0)
11930
11931/**
11932 * Calls a FPU assembly implementation taking three visible arguments.
11933 *
11934 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11935 * @param a0 The first extra argument.
11936 * @param a1 The second extra argument.
11937 * @param a2 The third extra argument.
11938 */
11939#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11940 do { \
11941 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11942 } while (0)
11943
11944#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11945 do { \
11946 (a_FpuData).FSW = (a_FSW); \
11947 (a_FpuData).r80Result = *(a_pr80Value); \
11948 } while (0)
11949
11950/** Pushes FPU result onto the stack. */
11951#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11952 iemFpuPushResult(pVCpu, &a_FpuData)
11953/** Pushes FPU result onto the stack and sets the FPUDP. */
11954#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11955 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11956
11957/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11958#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11959 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11960
11961/** Stores FPU result in a stack register. */
11962#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11963 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11964/** Stores FPU result in a stack register and pops the stack. */
11965#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11966 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11967/** Stores FPU result in a stack register and sets the FPUDP. */
11968#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11969 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11970/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11971 * stack. */
11972#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11973 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11974
11975/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11976#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11977 iemFpuUpdateOpcodeAndIp(pVCpu)
11978/** Free a stack register (for FFREE and FFREEP). */
11979#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11980 iemFpuStackFree(pVCpu, a_iStReg)
11981/** Increment the FPU stack pointer. */
11982#define IEM_MC_FPU_STACK_INC_TOP() \
11983 iemFpuStackIncTop(pVCpu)
11984/** Decrement the FPU stack pointer. */
11985#define IEM_MC_FPU_STACK_DEC_TOP() \
11986 iemFpuStackDecTop(pVCpu)
11987
11988/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11989#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11990 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11991/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11992#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11993 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11994/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11995#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11996 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11997/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11998#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11999 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12000/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12001 * stack. */
12002#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12003 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12004/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12005#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12006 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12007
12008/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12009#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12010 iemFpuStackUnderflow(pVCpu, a_iStDst)
12011/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12012 * stack. */
12013#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12014 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12015/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12016 * FPUDS. */
12017#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12018 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12019/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12020 * FPUDS. Pops stack. */
12021#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12022 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12023/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12024 * stack twice. */
12025#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12026 iemFpuStackUnderflowThenPopPop(pVCpu)
12027/** Raises a FPU stack underflow exception for an instruction pushing a result
12028 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12029#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12030 iemFpuStackPushUnderflow(pVCpu)
12031/** Raises a FPU stack underflow exception for an instruction pushing a result
12032 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12033#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12034 iemFpuStackPushUnderflowTwo(pVCpu)
12035
12036/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12037 * FPUIP, FPUCS and FOP. */
12038#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12039 iemFpuStackPushOverflow(pVCpu)
12040/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12041 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12042#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12043 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12044/** Prepares for using the FPU state.
12045 * Ensures that we can use the host FPU in the current context (RC+R0.
12046 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12047#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12048/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12049#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12050/** Actualizes the guest FPU state so it can be accessed and modified. */
12051#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12052
12053/** Prepares for using the SSE state.
12054 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12055 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12056#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12057/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12058#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12059/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12060#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12061
12062/** Prepares for using the AVX state.
12063 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12064 * Ensures the guest AVX state in the CPUMCTX is up to date.
12065 * @note This will include the AVX512 state too when support for it is added
12066 * due to the zero extending feature of VEX instruction. */
12067#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12068/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12069#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12070/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12071#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12072
12073/**
12074 * Calls a MMX assembly implementation taking two visible arguments.
12075 *
12076 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12077 * @param a0 The first extra argument.
12078 * @param a1 The second extra argument.
12079 */
12080#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12081 do { \
12082 IEM_MC_PREPARE_FPU_USAGE(); \
12083 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12084 } while (0)
12085
12086/**
12087 * Calls a MMX assembly implementation taking three visible arguments.
12088 *
12089 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 */
12094#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12095 do { \
12096 IEM_MC_PREPARE_FPU_USAGE(); \
12097 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12098 } while (0)
12099
12100
12101/**
12102 * Calls a SSE assembly implementation taking two visible arguments.
12103 *
12104 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12105 * @param a0 The first extra argument.
12106 * @param a1 The second extra argument.
12107 */
12108#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12109 do { \
12110 IEM_MC_PREPARE_SSE_USAGE(); \
12111 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12112 } while (0)
12113
12114/**
12115 * Calls a SSE assembly implementation taking three visible arguments.
12116 *
12117 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12118 * @param a0 The first extra argument.
12119 * @param a1 The second extra argument.
12120 * @param a2 The third extra argument.
12121 */
12122#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12123 do { \
12124 IEM_MC_PREPARE_SSE_USAGE(); \
12125 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12126 } while (0)
12127
12128
12129/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12130 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12131#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12132 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12133
12134/**
12135 * Calls a AVX assembly implementation taking two visible arguments.
12136 *
12137 * There is one implicit zero'th argument, a pointer to the extended state.
12138 *
12139 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12140 * @param a1 The first extra argument.
12141 * @param a2 The second extra argument.
12142 */
12143#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12144 do { \
12145 IEM_MC_PREPARE_AVX_USAGE(); \
12146 a_pfnAImpl(pXState, (a1), (a2)); \
12147 } while (0)
12148
12149/**
12150 * Calls a AVX assembly implementation taking three visible arguments.
12151 *
12152 * There is one implicit zero'th argument, a pointer to the extended state.
12153 *
12154 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12155 * @param a1 The first extra argument.
12156 * @param a2 The second extra argument.
12157 * @param a3 The third extra argument.
12158 */
12159#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12160 do { \
12161 IEM_MC_PREPARE_AVX_USAGE(); \
12162 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12163 } while (0)
12164
12165/** @note Not for IOPL or IF testing. */
12166#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12167/** @note Not for IOPL or IF testing. */
12168#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12169/** @note Not for IOPL or IF testing. */
12170#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12171/** @note Not for IOPL or IF testing. */
12172#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12173/** @note Not for IOPL or IF testing. */
12174#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12175 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12176 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12177/** @note Not for IOPL or IF testing. */
12178#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12179 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12180 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12181/** @note Not for IOPL or IF testing. */
12182#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12183 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12184 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12185 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12186/** @note Not for IOPL or IF testing. */
12187#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12188 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12189 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12190 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12191#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12192#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12193#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12194/** @note Not for IOPL or IF testing. */
12195#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12196 if ( pVCpu->cpum.GstCtx.cx != 0 \
12197 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12198/** @note Not for IOPL or IF testing. */
12199#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12200 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12201 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12202/** @note Not for IOPL or IF testing. */
12203#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12204 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12205 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12206/** @note Not for IOPL or IF testing. */
12207#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12208 if ( pVCpu->cpum.GstCtx.cx != 0 \
12209 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12210/** @note Not for IOPL or IF testing. */
12211#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12212 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12213 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12214/** @note Not for IOPL or IF testing. */
12215#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12216 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12217 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12218#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12219#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12220
12221#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12222 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12223#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12224 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12225#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12226 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12227#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12228 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12229#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12230 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12231#define IEM_MC_IF_FCW_IM() \
12232 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12233
12234#define IEM_MC_ELSE() } else {
12235#define IEM_MC_ENDIF() } do {} while (0)
12236
12237/** @} */
12238
12239
12240/** @name Opcode Debug Helpers.
12241 * @{
12242 */
12243#ifdef VBOX_WITH_STATISTICS
12244# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12245#else
12246# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12247#endif
12248
12249#ifdef DEBUG
12250# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12251 do { \
12252 IEMOP_INC_STATS(a_Stats); \
12253 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12254 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12255 } while (0)
12256
12257# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12258 do { \
12259 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12260 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12261 (void)RT_CONCAT(OP_,a_Upper); \
12262 (void)(a_fDisHints); \
12263 (void)(a_fIemHints); \
12264 } while (0)
12265
12266# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12267 do { \
12268 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12269 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12270 (void)RT_CONCAT(OP_,a_Upper); \
12271 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12272 (void)(a_fDisHints); \
12273 (void)(a_fIemHints); \
12274 } while (0)
12275
12276# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12277 do { \
12278 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12279 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12280 (void)RT_CONCAT(OP_,a_Upper); \
12281 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12282 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12283 (void)(a_fDisHints); \
12284 (void)(a_fIemHints); \
12285 } while (0)
12286
12287# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12288 do { \
12289 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12290 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12291 (void)RT_CONCAT(OP_,a_Upper); \
12292 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12293 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12294 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12295 (void)(a_fDisHints); \
12296 (void)(a_fIemHints); \
12297 } while (0)
12298
12299# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12300 do { \
12301 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12302 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12303 (void)RT_CONCAT(OP_,a_Upper); \
12304 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12305 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12306 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12307 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12308 (void)(a_fDisHints); \
12309 (void)(a_fIemHints); \
12310 } while (0)
12311
12312#else
12313# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12314
12315# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12316 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12317# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12318 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12319# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12320 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12321# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12322 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12323# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12324 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12325
12326#endif
12327
12328#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12329 IEMOP_MNEMONIC0EX(a_Lower, \
12330 #a_Lower, \
12331 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12332#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12333 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12334 #a_Lower " " #a_Op1, \
12335 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12336#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12337 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12338 #a_Lower " " #a_Op1 "," #a_Op2, \
12339 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12340#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12341 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12342 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12343 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12344#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12345 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12346 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12347 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12348
12349/** @} */
12350
12351
12352/** @name Opcode Helpers.
12353 * @{
12354 */
12355
12356#ifdef IN_RING3
12357# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12358 do { \
12359 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12360 else \
12361 { \
12362 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12363 return IEMOP_RAISE_INVALID_OPCODE(); \
12364 } \
12365 } while (0)
12366#else
12367# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12368 do { \
12369 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12370 else return IEMOP_RAISE_INVALID_OPCODE(); \
12371 } while (0)
12372#endif
12373
12374/** The instruction requires a 186 or later. */
12375#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12376# define IEMOP_HLP_MIN_186() do { } while (0)
12377#else
12378# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12379#endif
12380
12381/** The instruction requires a 286 or later. */
12382#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12383# define IEMOP_HLP_MIN_286() do { } while (0)
12384#else
12385# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12386#endif
12387
12388/** The instruction requires a 386 or later. */
12389#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12390# define IEMOP_HLP_MIN_386() do { } while (0)
12391#else
12392# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12393#endif
12394
12395/** The instruction requires a 386 or later if the given expression is true. */
12396#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12397# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12398#else
12399# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12400#endif
12401
12402/** The instruction requires a 486 or later. */
12403#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12404# define IEMOP_HLP_MIN_486() do { } while (0)
12405#else
12406# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12407#endif
12408
12409/** The instruction requires a Pentium (586) or later. */
12410#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12411# define IEMOP_HLP_MIN_586() do { } while (0)
12412#else
12413# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12414#endif
12415
12416/** The instruction requires a PentiumPro (686) or later. */
12417#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12418# define IEMOP_HLP_MIN_686() do { } while (0)
12419#else
12420# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12421#endif
12422
12423
12424/** The instruction raises an \#UD in real and V8086 mode. */
12425#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12426 do \
12427 { \
12428 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12429 else return IEMOP_RAISE_INVALID_OPCODE(); \
12430 } while (0)
12431
12432/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12433 * 64-bit mode. */
12434#define IEMOP_HLP_NO_64BIT() \
12435 do \
12436 { \
12437 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12438 return IEMOP_RAISE_INVALID_OPCODE(); \
12439 } while (0)
12440
12441/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12442 * 64-bit mode. */
12443#define IEMOP_HLP_ONLY_64BIT() \
12444 do \
12445 { \
12446 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12447 return IEMOP_RAISE_INVALID_OPCODE(); \
12448 } while (0)
12449
12450/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12451#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12452 do \
12453 { \
12454 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12455 iemRecalEffOpSize64Default(pVCpu); \
12456 } while (0)
12457
12458/** The instruction has 64-bit operand size if 64-bit mode. */
12459#define IEMOP_HLP_64BIT_OP_SIZE() \
12460 do \
12461 { \
12462 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12463 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12464 } while (0)
12465
12466/** Only a REX prefix immediately preceeding the first opcode byte takes
12467 * effect. This macro helps ensuring this as well as logging bad guest code. */
12468#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12469 do \
12470 { \
12471 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12472 { \
12473 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12474 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12475 pVCpu->iem.s.uRexB = 0; \
12476 pVCpu->iem.s.uRexIndex = 0; \
12477 pVCpu->iem.s.uRexReg = 0; \
12478 iemRecalEffOpSize(pVCpu); \
12479 } \
12480 } while (0)
12481
12482/**
12483 * Done decoding.
12484 */
12485#define IEMOP_HLP_DONE_DECODING() \
12486 do \
12487 { \
12488 /*nothing for now, maybe later... */ \
12489 } while (0)
12490
12491/**
12492 * Done decoding, raise \#UD exception if lock prefix present.
12493 */
12494#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12495 do \
12496 { \
12497 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12498 { /* likely */ } \
12499 else \
12500 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12501 } while (0)
12502
12503
12504/**
12505 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12506 * repnz or size prefixes are present, or if in real or v8086 mode.
12507 */
12508#define IEMOP_HLP_DONE_VEX_DECODING() \
12509 do \
12510 { \
12511 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12512 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12513 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12514 { /* likely */ } \
12515 else \
12516 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12517 } while (0)
12518
12519/**
12520 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12521 * repnz or size prefixes are present, or if in real or v8086 mode.
12522 */
12523#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12524 do \
12525 { \
12526 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12527 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12528 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12529 && pVCpu->iem.s.uVexLength == 0)) \
12530 { /* likely */ } \
12531 else \
12532 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12533 } while (0)
12534
12535
12536/**
12537 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12538 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12539 * register 0, or if in real or v8086 mode.
12540 */
12541#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12542 do \
12543 { \
12544 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12545 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12546 && !pVCpu->iem.s.uVex3rdReg \
12547 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12548 { /* likely */ } \
12549 else \
12550 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12551 } while (0)
12552
12553/**
12554 * Done decoding VEX, no V, L=0.
12555 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12556 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12557 */
12558#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12559 do \
12560 { \
12561 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12562 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12563 && pVCpu->iem.s.uVexLength == 0 \
12564 && pVCpu->iem.s.uVex3rdReg == 0 \
12565 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12566 { /* likely */ } \
12567 else \
12568 return IEMOP_RAISE_INVALID_OPCODE(); \
12569 } while (0)
12570
12571#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12572 do \
12573 { \
12574 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12575 { /* likely */ } \
12576 else \
12577 { \
12578 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12579 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12580 } \
12581 } while (0)
12582#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12583 do \
12584 { \
12585 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12586 { /* likely */ } \
12587 else \
12588 { \
12589 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12590 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12591 } \
12592 } while (0)
12593
12594/**
12595 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12596 * are present.
12597 */
12598#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12599 do \
12600 { \
12601 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12602 { /* likely */ } \
12603 else \
12604 return IEMOP_RAISE_INVALID_OPCODE(); \
12605 } while (0)
12606
12607
12608/**
12609 * Calculates the effective address of a ModR/M memory operand.
12610 *
12611 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12612 *
12613 * @return Strict VBox status code.
12614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12615 * @param bRm The ModRM byte.
12616 * @param cbImm The size of any immediate following the
12617 * effective address opcode bytes. Important for
12618 * RIP relative addressing.
12619 * @param pGCPtrEff Where to return the effective address.
12620 */
12621IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12622{
12623 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12624# define SET_SS_DEF() \
12625 do \
12626 { \
12627 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12628 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12629 } while (0)
12630
12631 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12632 {
12633/** @todo Check the effective address size crap! */
12634 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12635 {
12636 uint16_t u16EffAddr;
12637
12638 /* Handle the disp16 form with no registers first. */
12639 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12640 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12641 else
12642 {
12643 /* Get the displacment. */
12644 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12645 {
12646 case 0: u16EffAddr = 0; break;
12647 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12648 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12649 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12650 }
12651
12652 /* Add the base and index registers to the disp. */
12653 switch (bRm & X86_MODRM_RM_MASK)
12654 {
12655 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12656 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12657 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12658 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12659 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12660 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12661 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12662 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12663 }
12664 }
12665
12666 *pGCPtrEff = u16EffAddr;
12667 }
12668 else
12669 {
12670 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12671 uint32_t u32EffAddr;
12672
12673 /* Handle the disp32 form with no registers first. */
12674 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12675 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12676 else
12677 {
12678 /* Get the register (or SIB) value. */
12679 switch ((bRm & X86_MODRM_RM_MASK))
12680 {
12681 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12682 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12683 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12684 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12685 case 4: /* SIB */
12686 {
12687 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12688
12689 /* Get the index and scale it. */
12690 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12691 {
12692 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12693 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12694 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12695 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12696 case 4: u32EffAddr = 0; /*none */ break;
12697 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12698 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12699 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12701 }
12702 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12703
12704 /* add base */
12705 switch (bSib & X86_SIB_BASE_MASK)
12706 {
12707 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12708 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12709 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12710 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12711 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12712 case 5:
12713 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12714 {
12715 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12716 SET_SS_DEF();
12717 }
12718 else
12719 {
12720 uint32_t u32Disp;
12721 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12722 u32EffAddr += u32Disp;
12723 }
12724 break;
12725 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12726 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12727 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12728 }
12729 break;
12730 }
12731 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12732 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12733 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12735 }
12736
12737 /* Get and add the displacement. */
12738 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12739 {
12740 case 0:
12741 break;
12742 case 1:
12743 {
12744 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12745 u32EffAddr += i8Disp;
12746 break;
12747 }
12748 case 2:
12749 {
12750 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12751 u32EffAddr += u32Disp;
12752 break;
12753 }
12754 default:
12755 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12756 }
12757
12758 }
12759 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12760 *pGCPtrEff = u32EffAddr;
12761 else
12762 {
12763 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12764 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12765 }
12766 }
12767 }
12768 else
12769 {
12770 uint64_t u64EffAddr;
12771
12772 /* Handle the rip+disp32 form with no registers first. */
12773 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12774 {
12775 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12776 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12777 }
12778 else
12779 {
12780 /* Get the register (or SIB) value. */
12781 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12782 {
12783 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12784 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12785 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12786 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12787 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12788 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12789 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12790 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12791 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12792 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12793 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12794 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12795 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12796 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12797 /* SIB */
12798 case 4:
12799 case 12:
12800 {
12801 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12802
12803 /* Get the index and scale it. */
12804 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12805 {
12806 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12807 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12808 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12809 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12810 case 4: u64EffAddr = 0; /*none */ break;
12811 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12812 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12813 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12814 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12815 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12816 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12817 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12818 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
12819 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12820 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12821 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12822 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12823 }
12824 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12825
12826 /* add base */
12827 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12828 {
12829 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
12830 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
12831 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
12832 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
12833 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
12834 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
12835 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
12836 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
12837 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
12838 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
12839 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
12840 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
12841 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
12842 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
12843 /* complicated encodings */
12844 case 5:
12845 case 13:
12846 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12847 {
12848 if (!pVCpu->iem.s.uRexB)
12849 {
12850 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
12851 SET_SS_DEF();
12852 }
12853 else
12854 u64EffAddr += pVCpu->cpum.GstCtx.r13;
12855 }
12856 else
12857 {
12858 uint32_t u32Disp;
12859 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12860 u64EffAddr += (int32_t)u32Disp;
12861 }
12862 break;
12863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12864 }
12865 break;
12866 }
12867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12868 }
12869
12870 /* Get and add the displacement. */
12871 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12872 {
12873 case 0:
12874 break;
12875 case 1:
12876 {
12877 int8_t i8Disp;
12878 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12879 u64EffAddr += i8Disp;
12880 break;
12881 }
12882 case 2:
12883 {
12884 uint32_t u32Disp;
12885 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12886 u64EffAddr += (int32_t)u32Disp;
12887 break;
12888 }
12889 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12890 }
12891
12892 }
12893
12894 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12895 *pGCPtrEff = u64EffAddr;
12896 else
12897 {
12898 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12899 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12900 }
12901 }
12902
12903 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12904 return VINF_SUCCESS;
12905}
12906
12907
12908/**
12909 * Calculates the effective address of a ModR/M memory operand.
12910 *
12911 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12912 *
12913 * @return Strict VBox status code.
12914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12915 * @param bRm The ModRM byte.
12916 * @param cbImm The size of any immediate following the
12917 * effective address opcode bytes. Important for
12918 * RIP relative addressing.
12919 * @param pGCPtrEff Where to return the effective address.
12920 * @param offRsp RSP displacement.
12921 */
12922IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12923{
12924 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12925# define SET_SS_DEF() \
12926 do \
12927 { \
12928 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12929 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12930 } while (0)
12931
12932 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12933 {
12934/** @todo Check the effective address size crap! */
12935 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12936 {
12937 uint16_t u16EffAddr;
12938
12939 /* Handle the disp16 form with no registers first. */
12940 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12941 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12942 else
12943 {
12944 /* Get the displacment. */
12945 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12946 {
12947 case 0: u16EffAddr = 0; break;
12948 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12949 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12950 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12951 }
12952
12953 /* Add the base and index registers to the disp. */
12954 switch (bRm & X86_MODRM_RM_MASK)
12955 {
12956 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12957 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12958 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12959 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12960 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12961 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12962 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12963 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12964 }
12965 }
12966
12967 *pGCPtrEff = u16EffAddr;
12968 }
12969 else
12970 {
12971 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12972 uint32_t u32EffAddr;
12973
12974 /* Handle the disp32 form with no registers first. */
12975 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12976 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12977 else
12978 {
12979 /* Get the register (or SIB) value. */
12980 switch ((bRm & X86_MODRM_RM_MASK))
12981 {
12982 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12983 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12984 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12985 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12986 case 4: /* SIB */
12987 {
12988 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12989
12990 /* Get the index and scale it. */
12991 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12992 {
12993 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12994 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12995 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12996 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12997 case 4: u32EffAddr = 0; /*none */ break;
12998 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12999 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13000 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13002 }
13003 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13004
13005 /* add base */
13006 switch (bSib & X86_SIB_BASE_MASK)
13007 {
13008 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13009 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13010 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13011 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13012 case 4:
13013 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13014 SET_SS_DEF();
13015 break;
13016 case 5:
13017 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13018 {
13019 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13020 SET_SS_DEF();
13021 }
13022 else
13023 {
13024 uint32_t u32Disp;
13025 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13026 u32EffAddr += u32Disp;
13027 }
13028 break;
13029 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13030 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13032 }
13033 break;
13034 }
13035 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13036 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13037 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13039 }
13040
13041 /* Get and add the displacement. */
13042 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13043 {
13044 case 0:
13045 break;
13046 case 1:
13047 {
13048 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13049 u32EffAddr += i8Disp;
13050 break;
13051 }
13052 case 2:
13053 {
13054 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13055 u32EffAddr += u32Disp;
13056 break;
13057 }
13058 default:
13059 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13060 }
13061
13062 }
13063 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13064 *pGCPtrEff = u32EffAddr;
13065 else
13066 {
13067 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13068 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13069 }
13070 }
13071 }
13072 else
13073 {
13074 uint64_t u64EffAddr;
13075
13076 /* Handle the rip+disp32 form with no registers first. */
13077 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13078 {
13079 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13080 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13081 }
13082 else
13083 {
13084 /* Get the register (or SIB) value. */
13085 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13086 {
13087 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13088 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13089 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13090 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13091 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13092 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13093 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13094 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13095 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13096 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13097 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13098 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13099 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13100 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13101 /* SIB */
13102 case 4:
13103 case 12:
13104 {
13105 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13106
13107 /* Get the index and scale it. */
13108 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13109 {
13110 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13111 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13112 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13113 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13114 case 4: u64EffAddr = 0; /*none */ break;
13115 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13116 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13117 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13118 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13119 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13120 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13121 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13122 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13123 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13124 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13125 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13126 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13127 }
13128 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13129
13130 /* add base */
13131 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13132 {
13133 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13134 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13135 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13136 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13137 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13138 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13139 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13140 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13141 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13142 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13143 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13144 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13145 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13146 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13147 /* complicated encodings */
13148 case 5:
13149 case 13:
13150 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13151 {
13152 if (!pVCpu->iem.s.uRexB)
13153 {
13154 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13155 SET_SS_DEF();
13156 }
13157 else
13158 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13159 }
13160 else
13161 {
13162 uint32_t u32Disp;
13163 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13164 u64EffAddr += (int32_t)u32Disp;
13165 }
13166 break;
13167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13168 }
13169 break;
13170 }
13171 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13172 }
13173
13174 /* Get and add the displacement. */
13175 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13176 {
13177 case 0:
13178 break;
13179 case 1:
13180 {
13181 int8_t i8Disp;
13182 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13183 u64EffAddr += i8Disp;
13184 break;
13185 }
13186 case 2:
13187 {
13188 uint32_t u32Disp;
13189 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13190 u64EffAddr += (int32_t)u32Disp;
13191 break;
13192 }
13193 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13194 }
13195
13196 }
13197
13198 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13199 *pGCPtrEff = u64EffAddr;
13200 else
13201 {
13202 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13203 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13204 }
13205 }
13206
13207 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13208 return VINF_SUCCESS;
13209}
13210
13211
13212#ifdef IEM_WITH_SETJMP
13213/**
13214 * Calculates the effective address of a ModR/M memory operand.
13215 *
13216 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13217 *
13218 * May longjmp on internal error.
13219 *
13220 * @return The effective address.
13221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13222 * @param bRm The ModRM byte.
13223 * @param cbImm The size of any immediate following the
13224 * effective address opcode bytes. Important for
13225 * RIP relative addressing.
13226 */
13227IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13228{
13229 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13230# define SET_SS_DEF() \
13231 do \
13232 { \
13233 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13234 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13235 } while (0)
13236
13237 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13238 {
13239/** @todo Check the effective address size crap! */
13240 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13241 {
13242 uint16_t u16EffAddr;
13243
13244 /* Handle the disp16 form with no registers first. */
13245 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13246 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13247 else
13248 {
13249 /* Get the displacment. */
13250 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13251 {
13252 case 0: u16EffAddr = 0; break;
13253 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13254 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13255 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13256 }
13257
13258 /* Add the base and index registers to the disp. */
13259 switch (bRm & X86_MODRM_RM_MASK)
13260 {
13261 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13262 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13263 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13264 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13265 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13266 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13267 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13268 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13269 }
13270 }
13271
13272 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13273 return u16EffAddr;
13274 }
13275
13276 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13277 uint32_t u32EffAddr;
13278
13279 /* Handle the disp32 form with no registers first. */
13280 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13281 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13282 else
13283 {
13284 /* Get the register (or SIB) value. */
13285 switch ((bRm & X86_MODRM_RM_MASK))
13286 {
13287 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13288 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13289 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13290 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13291 case 4: /* SIB */
13292 {
13293 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13294
13295 /* Get the index and scale it. */
13296 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13297 {
13298 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13299 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13300 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13301 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13302 case 4: u32EffAddr = 0; /*none */ break;
13303 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13304 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13305 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13306 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13307 }
13308 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13309
13310 /* add base */
13311 switch (bSib & X86_SIB_BASE_MASK)
13312 {
13313 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13314 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13315 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13316 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13317 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13318 case 5:
13319 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13320 {
13321 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13322 SET_SS_DEF();
13323 }
13324 else
13325 {
13326 uint32_t u32Disp;
13327 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13328 u32EffAddr += u32Disp;
13329 }
13330 break;
13331 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13332 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13333 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13334 }
13335 break;
13336 }
13337 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13338 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13339 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13340 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13341 }
13342
13343 /* Get and add the displacement. */
13344 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13345 {
13346 case 0:
13347 break;
13348 case 1:
13349 {
13350 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13351 u32EffAddr += i8Disp;
13352 break;
13353 }
13354 case 2:
13355 {
13356 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13357 u32EffAddr += u32Disp;
13358 break;
13359 }
13360 default:
13361 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13362 }
13363 }
13364
13365 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13366 {
13367 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13368 return u32EffAddr;
13369 }
13370 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13371 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13372 return u32EffAddr & UINT16_MAX;
13373 }
13374
13375 uint64_t u64EffAddr;
13376
13377 /* Handle the rip+disp32 form with no registers first. */
13378 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13379 {
13380 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13381 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13382 }
13383 else
13384 {
13385 /* Get the register (or SIB) value. */
13386 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13387 {
13388 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13389 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13390 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13391 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13392 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13393 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13394 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13395 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13396 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13397 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13398 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13399 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13400 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13401 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13402 /* SIB */
13403 case 4:
13404 case 12:
13405 {
13406 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13407
13408 /* Get the index and scale it. */
13409 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13410 {
13411 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13412 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13413 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13414 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13415 case 4: u64EffAddr = 0; /*none */ break;
13416 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13417 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13418 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13419 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13420 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13421 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13422 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13423 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13424 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13425 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13426 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13427 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13428 }
13429 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13430
13431 /* add base */
13432 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13433 {
13434 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13435 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13436 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13437 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13438 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13439 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13440 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13441 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13442 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13443 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13444 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13445 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13446 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13447 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13448 /* complicated encodings */
13449 case 5:
13450 case 13:
13451 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13452 {
13453 if (!pVCpu->iem.s.uRexB)
13454 {
13455 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13456 SET_SS_DEF();
13457 }
13458 else
13459 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13460 }
13461 else
13462 {
13463 uint32_t u32Disp;
13464 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13465 u64EffAddr += (int32_t)u32Disp;
13466 }
13467 break;
13468 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13469 }
13470 break;
13471 }
13472 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13473 }
13474
13475 /* Get and add the displacement. */
13476 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13477 {
13478 case 0:
13479 break;
13480 case 1:
13481 {
13482 int8_t i8Disp;
13483 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13484 u64EffAddr += i8Disp;
13485 break;
13486 }
13487 case 2:
13488 {
13489 uint32_t u32Disp;
13490 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13491 u64EffAddr += (int32_t)u32Disp;
13492 break;
13493 }
13494 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13495 }
13496
13497 }
13498
13499 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13500 {
13501 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13502 return u64EffAddr;
13503 }
13504 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13505 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13506 return u64EffAddr & UINT32_MAX;
13507}
13508#endif /* IEM_WITH_SETJMP */
13509
13510/** @} */
13511
13512
13513
13514/*
13515 * Include the instructions
13516 */
13517#include "IEMAllInstructions.cpp.h"
13518
13519
13520
13521#ifdef LOG_ENABLED
13522/**
13523 * Logs the current instruction.
13524 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13525 * @param fSameCtx Set if we have the same context information as the VMM,
13526 * clear if we may have already executed an instruction in
13527 * our debug context. When clear, we assume IEMCPU holds
13528 * valid CPU mode info.
13529 *
13530 * The @a fSameCtx parameter is now misleading and obsolete.
13531 */
13532IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx)
13533{
13534# ifdef IN_RING3
13535 if (LogIs2Enabled())
13536 {
13537 char szInstr[256];
13538 uint32_t cbInstr = 0;
13539 if (fSameCtx)
13540 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13541 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13542 szInstr, sizeof(szInstr), &cbInstr);
13543 else
13544 {
13545 uint32_t fFlags = 0;
13546 switch (pVCpu->iem.s.enmCpuMode)
13547 {
13548 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13549 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13550 case IEMMODE_16BIT:
13551 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13552 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13553 else
13554 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13555 break;
13556 }
13557 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13558 szInstr, sizeof(szInstr), &cbInstr);
13559 }
13560
13561 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13562 Log2(("****\n"
13563 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13564 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13565 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13566 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13567 " %s\n"
13568 ,
13569 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13570 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13571 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13572 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13573 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13574 szInstr));
13575
13576 if (LogIs3Enabled())
13577 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13578 }
13579 else
13580# endif
13581 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13582 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13583 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13584}
13585#endif /* LOG_ENABLED */
13586
13587
13588/**
13589 * Makes status code addjustments (pass up from I/O and access handler)
13590 * as well as maintaining statistics.
13591 *
13592 * @returns Strict VBox status code to pass up.
13593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13594 * @param rcStrict The status from executing an instruction.
13595 */
13596DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13597{
13598 if (rcStrict != VINF_SUCCESS)
13599 {
13600 if (RT_SUCCESS(rcStrict))
13601 {
13602 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13603 || rcStrict == VINF_IOM_R3_IOPORT_READ
13604 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13605 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13606 || rcStrict == VINF_IOM_R3_MMIO_READ
13607 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13608 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13609 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13610 || rcStrict == VINF_CPUM_R3_MSR_READ
13611 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13612 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13613 || rcStrict == VINF_EM_RAW_TO_R3
13614 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13615 || rcStrict == VINF_EM_TRIPLE_FAULT
13616 /* raw-mode / virt handlers only: */
13617 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13618 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13619 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13620 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13621 || rcStrict == VINF_SELM_SYNC_GDT
13622 || rcStrict == VINF_CSAM_PENDING_ACTION
13623 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13624 /* nested hw.virt codes: */
13625 || rcStrict == VINF_SVM_VMEXIT
13626 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13627/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13628 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13629#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13630 if ( rcStrict == VINF_SVM_VMEXIT
13631 && rcPassUp == VINF_SUCCESS)
13632 rcStrict = VINF_SUCCESS;
13633 else
13634#endif
13635 if (rcPassUp == VINF_SUCCESS)
13636 pVCpu->iem.s.cRetInfStatuses++;
13637 else if ( rcPassUp < VINF_EM_FIRST
13638 || rcPassUp > VINF_EM_LAST
13639 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13640 {
13641 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13642 pVCpu->iem.s.cRetPassUpStatus++;
13643 rcStrict = rcPassUp;
13644 }
13645 else
13646 {
13647 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13648 pVCpu->iem.s.cRetInfStatuses++;
13649 }
13650 }
13651 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13652 pVCpu->iem.s.cRetAspectNotImplemented++;
13653 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13654 pVCpu->iem.s.cRetInstrNotImplemented++;
13655 else
13656 pVCpu->iem.s.cRetErrStatuses++;
13657 }
13658 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13659 {
13660 pVCpu->iem.s.cRetPassUpStatus++;
13661 rcStrict = pVCpu->iem.s.rcPassUp;
13662 }
13663
13664 return rcStrict;
13665}
13666
13667
13668/**
13669 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13670 * IEMExecOneWithPrefetchedByPC.
13671 *
13672 * Similar code is found in IEMExecLots.
13673 *
13674 * @return Strict VBox status code.
13675 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13677 * @param fExecuteInhibit If set, execute the instruction following CLI,
13678 * POP SS and MOV SS,GR.
13679 */
13680DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13681{
13682 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13683 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13684 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13685
13686#ifdef IEM_WITH_SETJMP
13687 VBOXSTRICTRC rcStrict;
13688 jmp_buf JmpBuf;
13689 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13690 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13691 if ((rcStrict = setjmp(JmpBuf)) == 0)
13692 {
13693 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13694 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13695 }
13696 else
13697 pVCpu->iem.s.cLongJumps++;
13698 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13699#else
13700 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13701 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13702#endif
13703 if (rcStrict == VINF_SUCCESS)
13704 pVCpu->iem.s.cInstructions++;
13705 if (pVCpu->iem.s.cActiveMappings > 0)
13706 {
13707 Assert(rcStrict != VINF_SUCCESS);
13708 iemMemRollback(pVCpu);
13709 }
13710 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13711 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13712 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13713
13714//#ifdef DEBUG
13715// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13716//#endif
13717
13718 /* Execute the next instruction as well if a cli, pop ss or
13719 mov ss, Gr has just completed successfully. */
13720 if ( fExecuteInhibit
13721 && rcStrict == VINF_SUCCESS
13722 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13723 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13724 {
13725 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13726 if (rcStrict == VINF_SUCCESS)
13727 {
13728#ifdef LOG_ENABLED
13729 iemLogCurInstr(pVCpu, false);
13730#endif
13731#ifdef IEM_WITH_SETJMP
13732 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13733 if ((rcStrict = setjmp(JmpBuf)) == 0)
13734 {
13735 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13736 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13737 }
13738 else
13739 pVCpu->iem.s.cLongJumps++;
13740 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13741#else
13742 IEM_OPCODE_GET_NEXT_U8(&b);
13743 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13744#endif
13745 if (rcStrict == VINF_SUCCESS)
13746 pVCpu->iem.s.cInstructions++;
13747 if (pVCpu->iem.s.cActiveMappings > 0)
13748 {
13749 Assert(rcStrict != VINF_SUCCESS);
13750 iemMemRollback(pVCpu);
13751 }
13752 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13753 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13754 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13755 }
13756 else if (pVCpu->iem.s.cActiveMappings > 0)
13757 iemMemRollback(pVCpu);
13758 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13759 }
13760
13761 /*
13762 * Return value fiddling, statistics and sanity assertions.
13763 */
13764 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13765
13766 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13767 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13768 return rcStrict;
13769}
13770
13771
13772#ifdef IN_RC
13773/**
13774 * Re-enters raw-mode or ensure we return to ring-3.
13775 *
13776 * @returns rcStrict, maybe modified.
13777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13778 * @param rcStrict The status code returne by the interpreter.
13779 */
13780DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13781{
13782 if ( !pVCpu->iem.s.fInPatchCode
13783 && ( rcStrict == VINF_SUCCESS
13784 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13785 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13786 {
13787 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13788 CPUMRawEnter(pVCpu);
13789 else
13790 {
13791 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13792 rcStrict = VINF_EM_RESCHEDULE;
13793 }
13794 }
13795 return rcStrict;
13796}
13797#endif
13798
13799
13800/**
13801 * Execute one instruction.
13802 *
13803 * @return Strict VBox status code.
13804 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13805 */
13806VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13807{
13808#ifdef LOG_ENABLED
13809 iemLogCurInstr(pVCpu, true);
13810#endif
13811
13812 /*
13813 * Do the decoding and emulation.
13814 */
13815 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13816 if (rcStrict == VINF_SUCCESS)
13817 rcStrict = iemExecOneInner(pVCpu, true);
13818 else if (pVCpu->iem.s.cActiveMappings > 0)
13819 iemMemRollback(pVCpu);
13820
13821#ifdef IN_RC
13822 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13823#endif
13824 if (rcStrict != VINF_SUCCESS)
13825 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13827 return rcStrict;
13828}
13829
13830
13831VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13832{
13833 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13834
13835 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13836 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13837 if (rcStrict == VINF_SUCCESS)
13838 {
13839 rcStrict = iemExecOneInner(pVCpu, true);
13840 if (pcbWritten)
13841 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13842 }
13843 else if (pVCpu->iem.s.cActiveMappings > 0)
13844 iemMemRollback(pVCpu);
13845
13846#ifdef IN_RC
13847 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13848#endif
13849 return rcStrict;
13850}
13851
13852
13853VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13854 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13855{
13856 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13857
13858 VBOXSTRICTRC rcStrict;
13859 if ( cbOpcodeBytes
13860 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13861 {
13862 iemInitDecoder(pVCpu, false);
13863#ifdef IEM_WITH_CODE_TLB
13864 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13865 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13866 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13867 pVCpu->iem.s.offCurInstrStart = 0;
13868 pVCpu->iem.s.offInstrNextByte = 0;
13869#else
13870 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13871 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13872#endif
13873 rcStrict = VINF_SUCCESS;
13874 }
13875 else
13876 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13877 if (rcStrict == VINF_SUCCESS)
13878 rcStrict = iemExecOneInner(pVCpu, true);
13879 else if (pVCpu->iem.s.cActiveMappings > 0)
13880 iemMemRollback(pVCpu);
13881
13882#ifdef IN_RC
13883 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13884#endif
13885 return rcStrict;
13886}
13887
13888
13889VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13890{
13891 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13892
13893 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13894 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13895 if (rcStrict == VINF_SUCCESS)
13896 {
13897 rcStrict = iemExecOneInner(pVCpu, false);
13898 if (pcbWritten)
13899 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13900 }
13901 else if (pVCpu->iem.s.cActiveMappings > 0)
13902 iemMemRollback(pVCpu);
13903
13904#ifdef IN_RC
13905 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13906#endif
13907 return rcStrict;
13908}
13909
13910
13911VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13912 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13913{
13914 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13915
13916 VBOXSTRICTRC rcStrict;
13917 if ( cbOpcodeBytes
13918 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13919 {
13920 iemInitDecoder(pVCpu, true);
13921#ifdef IEM_WITH_CODE_TLB
13922 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13923 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13924 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13925 pVCpu->iem.s.offCurInstrStart = 0;
13926 pVCpu->iem.s.offInstrNextByte = 0;
13927#else
13928 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13929 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13930#endif
13931 rcStrict = VINF_SUCCESS;
13932 }
13933 else
13934 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13935 if (rcStrict == VINF_SUCCESS)
13936 rcStrict = iemExecOneInner(pVCpu, false);
13937 else if (pVCpu->iem.s.cActiveMappings > 0)
13938 iemMemRollback(pVCpu);
13939
13940#ifdef IN_RC
13941 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13942#endif
13943 return rcStrict;
13944}
13945
13946
13947/**
13948 * For debugging DISGetParamSize, may come in handy.
13949 *
13950 * @returns Strict VBox status code.
13951 * @param pVCpu The cross context virtual CPU structure of the
13952 * calling EMT.
13953 * @param pCtxCore The context core structure.
13954 * @param OpcodeBytesPC The PC of the opcode bytes.
13955 * @param pvOpcodeBytes Prefeched opcode bytes.
13956 * @param cbOpcodeBytes Number of prefetched bytes.
13957 * @param pcbWritten Where to return the number of bytes written.
13958 * Optional.
13959 */
13960VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13961 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13962 uint32_t *pcbWritten)
13963{
13964 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
13965
13966 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13967 VBOXSTRICTRC rcStrict;
13968 if ( cbOpcodeBytes
13969 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
13970 {
13971 iemInitDecoder(pVCpu, true);
13972#ifdef IEM_WITH_CODE_TLB
13973 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13974 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13975 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13976 pVCpu->iem.s.offCurInstrStart = 0;
13977 pVCpu->iem.s.offInstrNextByte = 0;
13978#else
13979 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13980 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13981#endif
13982 rcStrict = VINF_SUCCESS;
13983 }
13984 else
13985 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13986 if (rcStrict == VINF_SUCCESS)
13987 {
13988 rcStrict = iemExecOneInner(pVCpu, false);
13989 if (pcbWritten)
13990 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13991 }
13992 else if (pVCpu->iem.s.cActiveMappings > 0)
13993 iemMemRollback(pVCpu);
13994
13995#ifdef IN_RC
13996 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13997#endif
13998 return rcStrict;
13999}
14000
14001
14002VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14003{
14004 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14005
14006 /*
14007 * See if there is an interrupt pending in TRPM, inject it if we can.
14008 */
14009 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14010#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14011 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14012 if (fIntrEnabled)
14013 {
14014 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14015 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14016 else
14017 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14018 }
14019#else
14020 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14021#endif
14022 if ( fIntrEnabled
14023 && TRPMHasTrap(pVCpu)
14024 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14025 {
14026 uint8_t u8TrapNo;
14027 TRPMEVENT enmType;
14028 RTGCUINT uErrCode;
14029 RTGCPTR uCr2;
14030 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14031 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14032 TRPMResetTrap(pVCpu);
14033 }
14034
14035 /*
14036 * Initial decoder init w/ prefetch, then setup setjmp.
14037 */
14038 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14039 if (rcStrict == VINF_SUCCESS)
14040 {
14041#ifdef IEM_WITH_SETJMP
14042 jmp_buf JmpBuf;
14043 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14044 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14045 pVCpu->iem.s.cActiveMappings = 0;
14046 if ((rcStrict = setjmp(JmpBuf)) == 0)
14047#endif
14048 {
14049 /*
14050 * The run loop. We limit ourselves to 4096 instructions right now.
14051 */
14052 PVM pVM = pVCpu->CTX_SUFF(pVM);
14053 uint32_t cInstr = 4096;
14054 for (;;)
14055 {
14056 /*
14057 * Log the state.
14058 */
14059#ifdef LOG_ENABLED
14060 iemLogCurInstr(pVCpu, true);
14061#endif
14062
14063 /*
14064 * Do the decoding and emulation.
14065 */
14066 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14067 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14068 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14069 {
14070 Assert(pVCpu->iem.s.cActiveMappings == 0);
14071 pVCpu->iem.s.cInstructions++;
14072 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14073 {
14074 uint32_t fCpu = pVCpu->fLocalForcedActions
14075 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14076 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14077 | VMCPU_FF_TLB_FLUSH
14078#ifdef VBOX_WITH_RAW_MODE
14079 | VMCPU_FF_TRPM_SYNC_IDT
14080 | VMCPU_FF_SELM_SYNC_TSS
14081 | VMCPU_FF_SELM_SYNC_GDT
14082 | VMCPU_FF_SELM_SYNC_LDT
14083#endif
14084 | VMCPU_FF_INHIBIT_INTERRUPTS
14085 | VMCPU_FF_BLOCK_NMIS
14086 | VMCPU_FF_UNHALT ));
14087
14088 if (RT_LIKELY( ( !fCpu
14089 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14090 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14091 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14092 {
14093 if (cInstr-- > 0)
14094 {
14095 Assert(pVCpu->iem.s.cActiveMappings == 0);
14096 iemReInitDecoder(pVCpu);
14097 continue;
14098 }
14099 }
14100 }
14101 Assert(pVCpu->iem.s.cActiveMappings == 0);
14102 }
14103 else if (pVCpu->iem.s.cActiveMappings > 0)
14104 iemMemRollback(pVCpu);
14105 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14106 break;
14107 }
14108 }
14109#ifdef IEM_WITH_SETJMP
14110 else
14111 {
14112 if (pVCpu->iem.s.cActiveMappings > 0)
14113 iemMemRollback(pVCpu);
14114 pVCpu->iem.s.cLongJumps++;
14115 }
14116 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14117#endif
14118
14119 /*
14120 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14121 */
14122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14124 }
14125 else
14126 {
14127 if (pVCpu->iem.s.cActiveMappings > 0)
14128 iemMemRollback(pVCpu);
14129
14130#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14131 /*
14132 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14133 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14134 */
14135 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14136#endif
14137 }
14138
14139 /*
14140 * Maybe re-enter raw-mode and log.
14141 */
14142#ifdef IN_RC
14143 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14144#endif
14145 if (rcStrict != VINF_SUCCESS)
14146 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14147 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14148 if (pcInstructions)
14149 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14150 return rcStrict;
14151}
14152
14153
14154
14155/**
14156 * Injects a trap, fault, abort, software interrupt or external interrupt.
14157 *
14158 * The parameter list matches TRPMQueryTrapAll pretty closely.
14159 *
14160 * @returns Strict VBox status code.
14161 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14162 * @param u8TrapNo The trap number.
14163 * @param enmType What type is it (trap/fault/abort), software
14164 * interrupt or hardware interrupt.
14165 * @param uErrCode The error code if applicable.
14166 * @param uCr2 The CR2 value if applicable.
14167 * @param cbInstr The instruction length (only relevant for
14168 * software interrupts).
14169 */
14170VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14171 uint8_t cbInstr)
14172{
14173 iemInitDecoder(pVCpu, false);
14174#ifdef DBGFTRACE_ENABLED
14175 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14176 u8TrapNo, enmType, uErrCode, uCr2);
14177#endif
14178
14179 uint32_t fFlags;
14180 switch (enmType)
14181 {
14182 case TRPM_HARDWARE_INT:
14183 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14184 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14185 uErrCode = uCr2 = 0;
14186 break;
14187
14188 case TRPM_SOFTWARE_INT:
14189 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14190 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14191 uErrCode = uCr2 = 0;
14192 break;
14193
14194 case TRPM_TRAP:
14195 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14196 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14197 if (u8TrapNo == X86_XCPT_PF)
14198 fFlags |= IEM_XCPT_FLAGS_CR2;
14199 switch (u8TrapNo)
14200 {
14201 case X86_XCPT_DF:
14202 case X86_XCPT_TS:
14203 case X86_XCPT_NP:
14204 case X86_XCPT_SS:
14205 case X86_XCPT_PF:
14206 case X86_XCPT_AC:
14207 fFlags |= IEM_XCPT_FLAGS_ERR;
14208 break;
14209
14210 case X86_XCPT_NMI:
14211 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14212 break;
14213 }
14214 break;
14215
14216 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14217 }
14218
14219 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14220
14221 if (pVCpu->iem.s.cActiveMappings > 0)
14222 iemMemRollback(pVCpu);
14223
14224 return rcStrict;
14225}
14226
14227
14228/**
14229 * Injects the active TRPM event.
14230 *
14231 * @returns Strict VBox status code.
14232 * @param pVCpu The cross context virtual CPU structure.
14233 */
14234VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14235{
14236#ifndef IEM_IMPLEMENTS_TASKSWITCH
14237 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14238#else
14239 uint8_t u8TrapNo;
14240 TRPMEVENT enmType;
14241 RTGCUINT uErrCode;
14242 RTGCUINTPTR uCr2;
14243 uint8_t cbInstr;
14244 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14245 if (RT_FAILURE(rc))
14246 return rc;
14247
14248 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14249# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14250 if (rcStrict == VINF_SVM_VMEXIT)
14251 rcStrict = VINF_SUCCESS;
14252# endif
14253
14254 /** @todo Are there any other codes that imply the event was successfully
14255 * delivered to the guest? See @bugref{6607}. */
14256 if ( rcStrict == VINF_SUCCESS
14257 || rcStrict == VINF_IEM_RAISED_XCPT)
14258 TRPMResetTrap(pVCpu);
14259
14260 return rcStrict;
14261#endif
14262}
14263
14264
14265VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14266{
14267 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14268 return VERR_NOT_IMPLEMENTED;
14269}
14270
14271
14272VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14273{
14274 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14275 return VERR_NOT_IMPLEMENTED;
14276}
14277
14278
14279#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14280/**
14281 * Executes a IRET instruction with default operand size.
14282 *
14283 * This is for PATM.
14284 *
14285 * @returns VBox status code.
14286 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14287 * @param pCtxCore The register frame.
14288 */
14289VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14290{
14291 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14292
14293 iemCtxCoreToCtx(pCtx, pCtxCore);
14294 iemInitDecoder(pVCpu);
14295 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14296 if (rcStrict == VINF_SUCCESS)
14297 iemCtxToCtxCore(pCtxCore, pCtx);
14298 else
14299 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14300 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14301 return rcStrict;
14302}
14303#endif
14304
14305
14306/**
14307 * Macro used by the IEMExec* method to check the given instruction length.
14308 *
14309 * Will return on failure!
14310 *
14311 * @param a_cbInstr The given instruction length.
14312 * @param a_cbMin The minimum length.
14313 */
14314#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14315 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14316 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14317
14318
14319/**
14320 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14321 *
14322 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14323 *
14324 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14326 * @param rcStrict The status code to fiddle.
14327 */
14328DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14329{
14330 iemUninitExec(pVCpu);
14331#ifdef IN_RC
14332 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14333#else
14334 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14335#endif
14336}
14337
14338
14339/**
14340 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14341 *
14342 * This API ASSUMES that the caller has already verified that the guest code is
14343 * allowed to access the I/O port. (The I/O port is in the DX register in the
14344 * guest state.)
14345 *
14346 * @returns Strict VBox status code.
14347 * @param pVCpu The cross context virtual CPU structure.
14348 * @param cbValue The size of the I/O port access (1, 2, or 4).
14349 * @param enmAddrMode The addressing mode.
14350 * @param fRepPrefix Indicates whether a repeat prefix is used
14351 * (doesn't matter which for this instruction).
14352 * @param cbInstr The instruction length in bytes.
14353 * @param iEffSeg The effective segment address.
14354 * @param fIoChecked Whether the access to the I/O port has been
14355 * checked or not. It's typically checked in the
14356 * HM scenario.
14357 */
14358VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14359 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14360{
14361 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14362 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14363
14364 /*
14365 * State init.
14366 */
14367 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14368
14369 /*
14370 * Switch orgy for getting to the right handler.
14371 */
14372 VBOXSTRICTRC rcStrict;
14373 if (fRepPrefix)
14374 {
14375 switch (enmAddrMode)
14376 {
14377 case IEMMODE_16BIT:
14378 switch (cbValue)
14379 {
14380 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14381 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14382 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14383 default:
14384 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14385 }
14386 break;
14387
14388 case IEMMODE_32BIT:
14389 switch (cbValue)
14390 {
14391 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14392 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14393 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14394 default:
14395 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14396 }
14397 break;
14398
14399 case IEMMODE_64BIT:
14400 switch (cbValue)
14401 {
14402 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14403 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14404 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14405 default:
14406 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14407 }
14408 break;
14409
14410 default:
14411 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14412 }
14413 }
14414 else
14415 {
14416 switch (enmAddrMode)
14417 {
14418 case IEMMODE_16BIT:
14419 switch (cbValue)
14420 {
14421 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14422 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14423 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14424 default:
14425 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14426 }
14427 break;
14428
14429 case IEMMODE_32BIT:
14430 switch (cbValue)
14431 {
14432 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14433 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14434 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14435 default:
14436 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14437 }
14438 break;
14439
14440 case IEMMODE_64BIT:
14441 switch (cbValue)
14442 {
14443 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14444 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14445 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14446 default:
14447 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14448 }
14449 break;
14450
14451 default:
14452 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14453 }
14454 }
14455
14456 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14457}
14458
14459
14460/**
14461 * Interface for HM and EM for executing string I/O IN (read) instructions.
14462 *
14463 * This API ASSUMES that the caller has already verified that the guest code is
14464 * allowed to access the I/O port. (The I/O port is in the DX register in the
14465 * guest state.)
14466 *
14467 * @returns Strict VBox status code.
14468 * @param pVCpu The cross context virtual CPU structure.
14469 * @param cbValue The size of the I/O port access (1, 2, or 4).
14470 * @param enmAddrMode The addressing mode.
14471 * @param fRepPrefix Indicates whether a repeat prefix is used
14472 * (doesn't matter which for this instruction).
14473 * @param cbInstr The instruction length in bytes.
14474 * @param fIoChecked Whether the access to the I/O port has been
14475 * checked or not. It's typically checked in the
14476 * HM scenario.
14477 */
14478VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14479 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14480{
14481 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14482
14483 /*
14484 * State init.
14485 */
14486 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14487
14488 /*
14489 * Switch orgy for getting to the right handler.
14490 */
14491 VBOXSTRICTRC rcStrict;
14492 if (fRepPrefix)
14493 {
14494 switch (enmAddrMode)
14495 {
14496 case IEMMODE_16BIT:
14497 switch (cbValue)
14498 {
14499 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14500 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14501 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14502 default:
14503 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14504 }
14505 break;
14506
14507 case IEMMODE_32BIT:
14508 switch (cbValue)
14509 {
14510 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14511 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14512 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14513 default:
14514 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14515 }
14516 break;
14517
14518 case IEMMODE_64BIT:
14519 switch (cbValue)
14520 {
14521 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14522 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14523 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14524 default:
14525 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14526 }
14527 break;
14528
14529 default:
14530 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14531 }
14532 }
14533 else
14534 {
14535 switch (enmAddrMode)
14536 {
14537 case IEMMODE_16BIT:
14538 switch (cbValue)
14539 {
14540 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14541 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14542 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14543 default:
14544 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14545 }
14546 break;
14547
14548 case IEMMODE_32BIT:
14549 switch (cbValue)
14550 {
14551 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14552 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14553 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14554 default:
14555 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14556 }
14557 break;
14558
14559 case IEMMODE_64BIT:
14560 switch (cbValue)
14561 {
14562 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14563 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14564 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14565 default:
14566 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14567 }
14568 break;
14569
14570 default:
14571 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14572 }
14573 }
14574
14575 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14576}
14577
14578
14579/**
14580 * Interface for rawmode to write execute an OUT instruction.
14581 *
14582 * @returns Strict VBox status code.
14583 * @param pVCpu The cross context virtual CPU structure.
14584 * @param cbInstr The instruction length in bytes.
14585 * @param u16Port The port to read.
14586 * @param cbReg The register size.
14587 *
14588 * @remarks In ring-0 not all of the state needs to be synced in.
14589 */
14590VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14591{
14592 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14593 Assert(cbReg <= 4 && cbReg != 3);
14594
14595 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14596 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14597 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14598}
14599
14600
14601/**
14602 * Interface for rawmode to write execute an IN instruction.
14603 *
14604 * @returns Strict VBox status code.
14605 * @param pVCpu The cross context virtual CPU structure.
14606 * @param cbInstr The instruction length in bytes.
14607 * @param u16Port The port to read.
14608 * @param cbReg The register size.
14609 */
14610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14611{
14612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14613 Assert(cbReg <= 4 && cbReg != 3);
14614
14615 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14618}
14619
14620
14621/**
14622 * Interface for HM and EM to write to a CRx register.
14623 *
14624 * @returns Strict VBox status code.
14625 * @param pVCpu The cross context virtual CPU structure.
14626 * @param cbInstr The instruction length in bytes.
14627 * @param iCrReg The control register number (destination).
14628 * @param iGReg The general purpose register number (source).
14629 *
14630 * @remarks In ring-0 not all of the state needs to be synced in.
14631 */
14632VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14633{
14634 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14635 Assert(iCrReg < 16);
14636 Assert(iGReg < 16);
14637
14638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14641}
14642
14643
14644/**
14645 * Interface for HM and EM to read from a CRx register.
14646 *
14647 * @returns Strict VBox status code.
14648 * @param pVCpu The cross context virtual CPU structure.
14649 * @param cbInstr The instruction length in bytes.
14650 * @param iGReg The general purpose register number (destination).
14651 * @param iCrReg The control register number (source).
14652 *
14653 * @remarks In ring-0 not all of the state needs to be synced in.
14654 */
14655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14656{
14657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14658 Assert(iCrReg < 16);
14659 Assert(iGReg < 16);
14660
14661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14664}
14665
14666
14667/**
14668 * Interface for HM and EM to clear the CR0[TS] bit.
14669 *
14670 * @returns Strict VBox status code.
14671 * @param pVCpu The cross context virtual CPU structure.
14672 * @param cbInstr The instruction length in bytes.
14673 *
14674 * @remarks In ring-0 not all of the state needs to be synced in.
14675 */
14676VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14677{
14678 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14679
14680 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14681 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14683}
14684
14685
14686/**
14687 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14688 *
14689 * @returns Strict VBox status code.
14690 * @param pVCpu The cross context virtual CPU structure.
14691 * @param cbInstr The instruction length in bytes.
14692 * @param uValue The value to load into CR0.
14693 *
14694 * @remarks In ring-0 not all of the state needs to be synced in.
14695 */
14696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14697{
14698 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14699
14700 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14701 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14703}
14704
14705
14706/**
14707 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14708 *
14709 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14710 *
14711 * @returns Strict VBox status code.
14712 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14713 * @param cbInstr The instruction length in bytes.
14714 * @remarks In ring-0 not all of the state needs to be synced in.
14715 * @thread EMT(pVCpu)
14716 */
14717VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14718{
14719 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14720
14721 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14722 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14723 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14724}
14725
14726
14727/**
14728 * Interface for HM and EM to emulate the INVLPG instruction.
14729 *
14730 * @param pVCpu The cross context virtual CPU structure.
14731 * @param cbInstr The instruction length in bytes.
14732 * @param GCPtrPage The effective address of the page to invalidate.
14733 *
14734 * @remarks In ring-0 not all of the state needs to be synced in.
14735 */
14736VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
14737{
14738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14739
14740 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14741 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
14742 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14743}
14744
14745
14746/**
14747 * Interface for HM and EM to emulate the INVPCID instruction.
14748 *
14749 * @param pVCpu The cross context virtual CPU structure.
14750 * @param cbInstr The instruction length in bytes.
14751 * @param uType The invalidation type.
14752 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
14753 *
14754 * @remarks In ring-0 not all of the state needs to be synced in.
14755 */
14756VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
14757{
14758 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
14759
14760 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14761 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
14762 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14763}
14764
14765
14766/**
14767 * Checks if IEM is in the process of delivering an event (interrupt or
14768 * exception).
14769 *
14770 * @returns true if we're in the process of raising an interrupt or exception,
14771 * false otherwise.
14772 * @param pVCpu The cross context virtual CPU structure.
14773 * @param puVector Where to store the vector associated with the
14774 * currently delivered event, optional.
14775 * @param pfFlags Where to store th event delivery flags (see
14776 * IEM_XCPT_FLAGS_XXX), optional.
14777 * @param puErr Where to store the error code associated with the
14778 * event, optional.
14779 * @param puCr2 Where to store the CR2 associated with the event,
14780 * optional.
14781 * @remarks The caller should check the flags to determine if the error code and
14782 * CR2 are valid for the event.
14783 */
14784VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
14785{
14786 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
14787 if (fRaisingXcpt)
14788 {
14789 if (puVector)
14790 *puVector = pVCpu->iem.s.uCurXcpt;
14791 if (pfFlags)
14792 *pfFlags = pVCpu->iem.s.fCurXcpt;
14793 if (puErr)
14794 *puErr = pVCpu->iem.s.uCurXcptErr;
14795 if (puCr2)
14796 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
14797 }
14798 return fRaisingXcpt;
14799}
14800
14801#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14802
14803/**
14804 * Interface for HM and EM to emulate the CLGI instruction.
14805 *
14806 * @returns Strict VBox status code.
14807 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14808 * @param cbInstr The instruction length in bytes.
14809 * @thread EMT(pVCpu)
14810 */
14811VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
14812{
14813 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14814
14815 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14816 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
14817 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14818}
14819
14820
14821/**
14822 * Interface for HM and EM to emulate the STGI instruction.
14823 *
14824 * @returns Strict VBox status code.
14825 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14826 * @param cbInstr The instruction length in bytes.
14827 * @thread EMT(pVCpu)
14828 */
14829VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
14830{
14831 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14832
14833 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14834 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
14835 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14836}
14837
14838
14839/**
14840 * Interface for HM and EM to emulate the VMLOAD instruction.
14841 *
14842 * @returns Strict VBox status code.
14843 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14844 * @param cbInstr The instruction length in bytes.
14845 * @thread EMT(pVCpu)
14846 */
14847VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
14848{
14849 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14850
14851 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14852 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
14853 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14854}
14855
14856
14857/**
14858 * Interface for HM and EM to emulate the VMSAVE instruction.
14859 *
14860 * @returns Strict VBox status code.
14861 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14862 * @param cbInstr The instruction length in bytes.
14863 * @thread EMT(pVCpu)
14864 */
14865VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
14866{
14867 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14868
14869 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14870 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
14871 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14872}
14873
14874
14875/**
14876 * Interface for HM and EM to emulate the INVLPGA instruction.
14877 *
14878 * @returns Strict VBox status code.
14879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14880 * @param cbInstr The instruction length in bytes.
14881 * @thread EMT(pVCpu)
14882 */
14883VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
14884{
14885 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14886
14887 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14888 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
14889 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14890}
14891
14892
14893/**
14894 * Interface for HM and EM to emulate the VMRUN instruction.
14895 *
14896 * @returns Strict VBox status code.
14897 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14898 * @param cbInstr The instruction length in bytes.
14899 * @thread EMT(pVCpu)
14900 */
14901VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
14902{
14903 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14904
14905 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14906 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
14907 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14908}
14909
14910
14911/**
14912 * Interface for HM and EM to emulate \#VMEXIT.
14913 *
14914 * @returns Strict VBox status code.
14915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14916 * @param uExitCode The exit code.
14917 * @param uExitInfo1 The exit info. 1 field.
14918 * @param uExitInfo2 The exit info. 2 field.
14919 * @thread EMT(pVCpu)
14920 */
14921VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
14922{
14923 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
14924 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
14925 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14926}
14927
14928#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
14929#ifdef IN_RING3
14930
14931/**
14932 * Handles the unlikely and probably fatal merge cases.
14933 *
14934 * @returns Merged status code.
14935 * @param rcStrict Current EM status code.
14936 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14937 * with @a rcStrict.
14938 * @param iMemMap The memory mapping index. For error reporting only.
14939 * @param pVCpu The cross context virtual CPU structure of the calling
14940 * thread, for error reporting only.
14941 */
14942DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14943 unsigned iMemMap, PVMCPU pVCpu)
14944{
14945 if (RT_FAILURE_NP(rcStrict))
14946 return rcStrict;
14947
14948 if (RT_FAILURE_NP(rcStrictCommit))
14949 return rcStrictCommit;
14950
14951 if (rcStrict == rcStrictCommit)
14952 return rcStrictCommit;
14953
14954 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14955 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14956 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14958 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14959 return VERR_IOM_FF_STATUS_IPE;
14960}
14961
14962
14963/**
14964 * Helper for IOMR3ProcessForceFlag.
14965 *
14966 * @returns Merged status code.
14967 * @param rcStrict Current EM status code.
14968 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14969 * with @a rcStrict.
14970 * @param iMemMap The memory mapping index. For error reporting only.
14971 * @param pVCpu The cross context virtual CPU structure of the calling
14972 * thread, for error reporting only.
14973 */
14974DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14975{
14976 /* Simple. */
14977 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14978 return rcStrictCommit;
14979
14980 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14981 return rcStrict;
14982
14983 /* EM scheduling status codes. */
14984 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14985 && rcStrict <= VINF_EM_LAST))
14986 {
14987 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14988 && rcStrictCommit <= VINF_EM_LAST))
14989 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14990 }
14991
14992 /* Unlikely */
14993 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14994}
14995
14996
14997/**
14998 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14999 *
15000 * @returns Merge between @a rcStrict and what the commit operation returned.
15001 * @param pVM The cross context VM structure.
15002 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15003 * @param rcStrict The status code returned by ring-0 or raw-mode.
15004 */
15005VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15006{
15007 /*
15008 * Reset the pending commit.
15009 */
15010 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15011 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15012 ("%#x %#x %#x\n",
15013 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15014 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15015
15016 /*
15017 * Commit the pending bounce buffers (usually just one).
15018 */
15019 unsigned cBufs = 0;
15020 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15021 while (iMemMap-- > 0)
15022 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15023 {
15024 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15025 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15026 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15027
15028 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15029 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15030 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15031
15032 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15033 {
15034 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15035 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15036 pbBuf,
15037 cbFirst,
15038 PGMACCESSORIGIN_IEM);
15039 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15040 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15041 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15042 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15043 }
15044
15045 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15046 {
15047 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15049 pbBuf + cbFirst,
15050 cbSecond,
15051 PGMACCESSORIGIN_IEM);
15052 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15053 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15054 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15055 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15056 }
15057 cBufs++;
15058 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15059 }
15060
15061 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15062 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15063 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15064 pVCpu->iem.s.cActiveMappings = 0;
15065 return rcStrict;
15066}
15067
15068#endif /* IN_RING3 */
15069
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette