VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62490

Last change on this file since 62490 was 62478, checked in by vboxsync, 8 years ago

(C) 2016

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 573.9 KB
Line 
1/* $Id: IEMAll.cpp 62478 2016-07-22 18:29:06Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#define VMCPU_INCL_CPUM_GST_CTX
91#include <VBox/vmm/iem.h>
92#include <VBox/vmm/cpum.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209
210/*********************************************************************************************************************************
211* Defined Constants And Macros *
212*********************************************************************************************************************************/
213/** @def IEM_WITH_SETJMP
214 * Enables alternative status code handling using setjmps.
215 *
216 * This adds a bit of expense via the setjmp() call since it saves all the
217 * non-volatile registers. However, it eliminates return code checks and allows
218 * for more optimal return value passing (return regs instead of stack buffer).
219 */
220#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
221# define IEM_WITH_SETJMP
222#endif
223
224/** Temporary hack to disable the double execution. Will be removed in favor
225 * of a dedicated execution mode in EM. */
226//#define IEM_VERIFICATION_MODE_NO_REM
227
228/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
229 * due to GCC lacking knowledge about the value range of a switch. */
230#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
231
232/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
233#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
234
235/**
236 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
237 * occation.
238 */
239#ifdef LOG_ENABLED
240# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
241 do { \
242 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
243 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
244 } while (0)
245#else
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
248#endif
249
250/**
251 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
252 * occation using the supplied logger statement.
253 *
254 * @param a_LoggerArgs What to log on failure.
255 */
256#ifdef LOG_ENABLED
257# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
258 do { \
259 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
260 /*LogFunc(a_LoggerArgs);*/ \
261 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
262 } while (0)
263#else
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
266#endif
267
268/**
269 * Call an opcode decoder function.
270 *
271 * We're using macors for this so that adding and removing parameters can be
272 * done as we please. See FNIEMOP_DEF.
273 */
274#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
275
276/**
277 * Call a common opcode decoder function taking one extra argument.
278 *
279 * We're using macors for this so that adding and removing parameters can be
280 * done as we please. See FNIEMOP_DEF_1.
281 */
282#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
283
284/**
285 * Call a common opcode decoder function taking one extra argument.
286 *
287 * We're using macors for this so that adding and removing parameters can be
288 * done as we please. See FNIEMOP_DEF_1.
289 */
290#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
291
292/**
293 * Check if we're currently executing in real or virtual 8086 mode.
294 *
295 * @returns @c true if it is, @c false if not.
296 * @param a_pVCpu The IEM state of the current CPU.
297 */
298#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
299
300/**
301 * Check if we're currently executing in virtual 8086 mode.
302 *
303 * @returns @c true if it is, @c false if not.
304 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
305 */
306#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
307
308/**
309 * Check if we're currently executing in long mode.
310 *
311 * @returns @c true if it is, @c false if not.
312 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
313 */
314#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
315
316/**
317 * Check if we're currently executing in real mode.
318 *
319 * @returns @c true if it is, @c false if not.
320 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
321 */
322#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
323
324/**
325 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
326 * @returns PCCPUMFEATURES
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
330
331/**
332 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
333 * @returns PCCPUMFEATURES
334 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
335 */
336#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
337
338/**
339 * Evaluates to true if we're presenting an Intel CPU to the guest.
340 */
341#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
342
343/**
344 * Evaluates to true if we're presenting an AMD CPU to the guest.
345 */
346#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
347
348/**
349 * Check if the address is canonical.
350 */
351#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
352
353/** @def IEM_USE_UNALIGNED_DATA_ACCESS
354 * Use unaligned accesses instead of elaborate byte assembly. */
355#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
356# define IEM_USE_UNALIGNED_DATA_ACCESS
357#endif
358
359
360/*********************************************************************************************************************************
361* Global Variables *
362*********************************************************************************************************************************/
363extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
364
365
366/** Function table for the ADD instruction. */
367IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
368{
369 iemAImpl_add_u8, iemAImpl_add_u8_locked,
370 iemAImpl_add_u16, iemAImpl_add_u16_locked,
371 iemAImpl_add_u32, iemAImpl_add_u32_locked,
372 iemAImpl_add_u64, iemAImpl_add_u64_locked
373};
374
375/** Function table for the ADC instruction. */
376IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
377{
378 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
379 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
380 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
381 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
382};
383
384/** Function table for the SUB instruction. */
385IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
386{
387 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
388 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
389 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
390 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
391};
392
393/** Function table for the SBB instruction. */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
395{
396 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
397 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
398 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
399 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
400};
401
402/** Function table for the OR instruction. */
403IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
404{
405 iemAImpl_or_u8, iemAImpl_or_u8_locked,
406 iemAImpl_or_u16, iemAImpl_or_u16_locked,
407 iemAImpl_or_u32, iemAImpl_or_u32_locked,
408 iemAImpl_or_u64, iemAImpl_or_u64_locked
409};
410
411/** Function table for the XOR instruction. */
412IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
413{
414 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
415 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
416 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
417 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
418};
419
420/** Function table for the AND instruction. */
421IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
422{
423 iemAImpl_and_u8, iemAImpl_and_u8_locked,
424 iemAImpl_and_u16, iemAImpl_and_u16_locked,
425 iemAImpl_and_u32, iemAImpl_and_u32_locked,
426 iemAImpl_and_u64, iemAImpl_and_u64_locked
427};
428
429/** Function table for the CMP instruction.
430 * @remarks Making operand order ASSUMPTIONS.
431 */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
433{
434 iemAImpl_cmp_u8, NULL,
435 iemAImpl_cmp_u16, NULL,
436 iemAImpl_cmp_u32, NULL,
437 iemAImpl_cmp_u64, NULL
438};
439
440/** Function table for the TEST instruction.
441 * @remarks Making operand order ASSUMPTIONS.
442 */
443IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
444{
445 iemAImpl_test_u8, NULL,
446 iemAImpl_test_u16, NULL,
447 iemAImpl_test_u32, NULL,
448 iemAImpl_test_u64, NULL
449};
450
451/** Function table for the BT instruction. */
452IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
453{
454 NULL, NULL,
455 iemAImpl_bt_u16, NULL,
456 iemAImpl_bt_u32, NULL,
457 iemAImpl_bt_u64, NULL
458};
459
460/** Function table for the BTC instruction. */
461IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
462{
463 NULL, NULL,
464 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
465 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
466 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
467};
468
469/** Function table for the BTR instruction. */
470IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
471{
472 NULL, NULL,
473 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
474 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
475 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
476};
477
478/** Function table for the BTS instruction. */
479IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
480{
481 NULL, NULL,
482 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
483 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
484 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
485};
486
487/** Function table for the BSF instruction. */
488IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
489{
490 NULL, NULL,
491 iemAImpl_bsf_u16, NULL,
492 iemAImpl_bsf_u32, NULL,
493 iemAImpl_bsf_u64, NULL
494};
495
496/** Function table for the BSR instruction. */
497IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
498{
499 NULL, NULL,
500 iemAImpl_bsr_u16, NULL,
501 iemAImpl_bsr_u32, NULL,
502 iemAImpl_bsr_u64, NULL
503};
504
505/** Function table for the IMUL instruction. */
506IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
507{
508 NULL, NULL,
509 iemAImpl_imul_two_u16, NULL,
510 iemAImpl_imul_two_u32, NULL,
511 iemAImpl_imul_two_u64, NULL
512};
513
514/** Group 1 /r lookup table. */
515IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
516{
517 &g_iemAImpl_add,
518 &g_iemAImpl_or,
519 &g_iemAImpl_adc,
520 &g_iemAImpl_sbb,
521 &g_iemAImpl_and,
522 &g_iemAImpl_sub,
523 &g_iemAImpl_xor,
524 &g_iemAImpl_cmp
525};
526
527/** Function table for the INC instruction. */
528IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
529{
530 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
531 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
532 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
533 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
534};
535
536/** Function table for the DEC instruction. */
537IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
538{
539 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
540 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
541 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
542 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
543};
544
545/** Function table for the NEG instruction. */
546IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
547{
548 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
549 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
550 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
551 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
552};
553
554/** Function table for the NOT instruction. */
555IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
556{
557 iemAImpl_not_u8, iemAImpl_not_u8_locked,
558 iemAImpl_not_u16, iemAImpl_not_u16_locked,
559 iemAImpl_not_u32, iemAImpl_not_u32_locked,
560 iemAImpl_not_u64, iemAImpl_not_u64_locked
561};
562
563
564/** Function table for the ROL instruction. */
565IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
566{
567 iemAImpl_rol_u8,
568 iemAImpl_rol_u16,
569 iemAImpl_rol_u32,
570 iemAImpl_rol_u64
571};
572
573/** Function table for the ROR instruction. */
574IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
575{
576 iemAImpl_ror_u8,
577 iemAImpl_ror_u16,
578 iemAImpl_ror_u32,
579 iemAImpl_ror_u64
580};
581
582/** Function table for the RCL instruction. */
583IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
584{
585 iemAImpl_rcl_u8,
586 iemAImpl_rcl_u16,
587 iemAImpl_rcl_u32,
588 iemAImpl_rcl_u64
589};
590
591/** Function table for the RCR instruction. */
592IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
593{
594 iemAImpl_rcr_u8,
595 iemAImpl_rcr_u16,
596 iemAImpl_rcr_u32,
597 iemAImpl_rcr_u64
598};
599
600/** Function table for the SHL instruction. */
601IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
602{
603 iemAImpl_shl_u8,
604 iemAImpl_shl_u16,
605 iemAImpl_shl_u32,
606 iemAImpl_shl_u64
607};
608
609/** Function table for the SHR instruction. */
610IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
611{
612 iemAImpl_shr_u8,
613 iemAImpl_shr_u16,
614 iemAImpl_shr_u32,
615 iemAImpl_shr_u64
616};
617
618/** Function table for the SAR instruction. */
619IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
620{
621 iemAImpl_sar_u8,
622 iemAImpl_sar_u16,
623 iemAImpl_sar_u32,
624 iemAImpl_sar_u64
625};
626
627
628/** Function table for the MUL instruction. */
629IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
630{
631 iemAImpl_mul_u8,
632 iemAImpl_mul_u16,
633 iemAImpl_mul_u32,
634 iemAImpl_mul_u64
635};
636
637/** Function table for the IMUL instruction working implicitly on rAX. */
638IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
639{
640 iemAImpl_imul_u8,
641 iemAImpl_imul_u16,
642 iemAImpl_imul_u32,
643 iemAImpl_imul_u64
644};
645
646/** Function table for the DIV instruction. */
647IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
648{
649 iemAImpl_div_u8,
650 iemAImpl_div_u16,
651 iemAImpl_div_u32,
652 iemAImpl_div_u64
653};
654
655/** Function table for the MUL instruction. */
656IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
657{
658 iemAImpl_idiv_u8,
659 iemAImpl_idiv_u16,
660 iemAImpl_idiv_u32,
661 iemAImpl_idiv_u64
662};
663
664/** Function table for the SHLD instruction */
665IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
666{
667 iemAImpl_shld_u16,
668 iemAImpl_shld_u32,
669 iemAImpl_shld_u64,
670};
671
672/** Function table for the SHRD instruction */
673IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
674{
675 iemAImpl_shrd_u16,
676 iemAImpl_shrd_u32,
677 iemAImpl_shrd_u64,
678};
679
680
681/** Function table for the PUNPCKLBW instruction */
682IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
683/** Function table for the PUNPCKLBD instruction */
684IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
685/** Function table for the PUNPCKLDQ instruction */
686IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
687/** Function table for the PUNPCKLQDQ instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
689
690/** Function table for the PUNPCKHBW instruction */
691IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
692/** Function table for the PUNPCKHBD instruction */
693IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
694/** Function table for the PUNPCKHDQ instruction */
695IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
696/** Function table for the PUNPCKHQDQ instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
698
699/** Function table for the PXOR instruction */
700IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
701/** Function table for the PCMPEQB instruction */
702IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
703/** Function table for the PCMPEQW instruction */
704IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
705/** Function table for the PCMPEQD instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
707
708
709#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
710/** What IEM just wrote. */
711uint8_t g_abIemWrote[256];
712/** How much IEM just wrote. */
713size_t g_cbIemWrote;
714#endif
715
716
717/*********************************************************************************************************************************
718* Internal Functions *
719*********************************************************************************************************************************/
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
721IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
722IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
723IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
724/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
725IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
726IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
728IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
729IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
730IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
731IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
733IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
734IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
735IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
736IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
737#ifdef IEM_WITH_SETJMP
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
742DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
743#endif
744
745IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
746IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
747IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
752IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
753IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
754IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
756IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
757IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
758IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
759IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
760IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
761
762#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
763IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
764#endif
765IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
766IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
767
768
769
770/**
771 * Sets the pass up status.
772 *
773 * @returns VINF_SUCCESS.
774 * @param pVCpu The cross context virtual CPU structure of the
775 * calling thread.
776 * @param rcPassUp The pass up status. Must be informational.
777 * VINF_SUCCESS is not allowed.
778 */
779IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
780{
781 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
782
783 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
784 if (rcOldPassUp == VINF_SUCCESS)
785 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
786 /* If both are EM scheduling codes, use EM priority rules. */
787 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
788 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
789 {
790 if (rcPassUp < rcOldPassUp)
791 {
792 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
793 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
794 }
795 else
796 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
797 }
798 /* Override EM scheduling with specific status code. */
799 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
800 {
801 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
802 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
803 }
804 /* Don't override specific status code, first come first served. */
805 else
806 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
807 return VINF_SUCCESS;
808}
809
810
811/**
812 * Calculates the CPU mode.
813 *
814 * This is mainly for updating IEMCPU::enmCpuMode.
815 *
816 * @returns CPU mode.
817 * @param pCtx The register context for the CPU.
818 */
819DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
820{
821 if (CPUMIsGuestIn64BitCodeEx(pCtx))
822 return IEMMODE_64BIT;
823 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
824 return IEMMODE_32BIT;
825 return IEMMODE_16BIT;
826}
827
828
829/**
830 * Initializes the execution state.
831 *
832 * @param pVCpu The cross context virtual CPU structure of the
833 * calling thread.
834 * @param fBypassHandlers Whether to bypass access handlers.
835 *
836 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
837 * side-effects in strict builds.
838 */
839DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
840{
841 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
842
843 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
844
845#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
854#endif
855
856#ifdef VBOX_WITH_RAW_MODE_NOT_R0
857 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
858#endif
859 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
860 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
861#ifdef VBOX_STRICT
862 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
864 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
865 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
866 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
867 pVCpu->iem.s.uRexReg = 127;
868 pVCpu->iem.s.uRexB = 127;
869 pVCpu->iem.s.uRexIndex = 127;
870 pVCpu->iem.s.iEffSeg = 127;
871 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
872# ifdef IEM_WITH_CODE_TLB
873 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
874 pVCpu->iem.s.pbInstrBuf = NULL;
875 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
876 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
877 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
878 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
879# else
880 pVCpu->iem.s.offOpcode = 127;
881 pVCpu->iem.s.cbOpcode = 127;
882# endif
883#endif
884
885 pVCpu->iem.s.cActiveMappings = 0;
886 pVCpu->iem.s.iNextMapping = 0;
887 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
888 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
889#ifdef VBOX_WITH_RAW_MODE_NOT_R0
890 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
891 && pCtx->cs.u64Base == 0
892 && pCtx->cs.u32Limit == UINT32_MAX
893 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
894 if (!pVCpu->iem.s.fInPatchCode)
895 CPUMRawLeave(pVCpu, VINF_SUCCESS);
896#endif
897
898#ifdef IEM_VERIFICATION_MODE_FULL
899 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
900 pVCpu->iem.s.fNoRem = true;
901#endif
902}
903
904
905/**
906 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
907 *
908 * @param pVCpu The cross context virtual CPU structure of the
909 * calling thread.
910 */
911DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
912{
913 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
914#ifdef IEM_VERIFICATION_MODE_FULL
915 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
916#endif
917#ifdef VBOX_STRICT
918# ifdef IEM_WITH_CODE_TLB
919# else
920 pVCpu->iem.s.cbOpcode = 0;
921# endif
922#else
923 NOREF(pVCpu);
924#endif
925}
926
927
928/**
929 * Initializes the decoder state.
930 *
931 * iemReInitDecoder is mostly a copy of this function.
932 *
933 * @param pVCpu The cross context virtual CPU structure of the
934 * calling thread.
935 * @param fBypassHandlers Whether to bypass access handlers.
936 */
937DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
938{
939 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
940
941 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
942
943#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
952#endif
953
954#ifdef VBOX_WITH_RAW_MODE_NOT_R0
955 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
956#endif
957 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
958#ifdef IEM_VERIFICATION_MODE_FULL
959 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
960 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
961#endif
962 IEMMODE enmMode = iemCalcCpuMode(pCtx);
963 pVCpu->iem.s.enmCpuMode = enmMode;
964 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
965 pVCpu->iem.s.enmEffAddrMode = enmMode;
966 if (enmMode != IEMMODE_64BIT)
967 {
968 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
969 pVCpu->iem.s.enmEffOpSize = enmMode;
970 }
971 else
972 {
973 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
974 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
975 }
976 pVCpu->iem.s.fPrefixes = 0;
977 pVCpu->iem.s.uRexReg = 0;
978 pVCpu->iem.s.uRexB = 0;
979 pVCpu->iem.s.uRexIndex = 0;
980 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
981#ifdef IEM_WITH_CODE_TLB
982 pVCpu->iem.s.pbInstrBuf = NULL;
983 pVCpu->iem.s.offInstrNextByte = 0;
984 pVCpu->iem.s.offCurInstrStart = 0;
985# ifdef VBOX_STRICT
986 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
987 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
988 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
989# endif
990#else
991 pVCpu->iem.s.offOpcode = 0;
992 pVCpu->iem.s.cbOpcode = 0;
993#endif
994 pVCpu->iem.s.cActiveMappings = 0;
995 pVCpu->iem.s.iNextMapping = 0;
996 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
997 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
998#ifdef VBOX_WITH_RAW_MODE_NOT_R0
999 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1000 && pCtx->cs.u64Base == 0
1001 && pCtx->cs.u32Limit == UINT32_MAX
1002 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1003 if (!pVCpu->iem.s.fInPatchCode)
1004 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1005#endif
1006
1007#ifdef DBGFTRACE_ENABLED
1008 switch (enmMode)
1009 {
1010 case IEMMODE_64BIT:
1011 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1012 break;
1013 case IEMMODE_32BIT:
1014 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1015 break;
1016 case IEMMODE_16BIT:
1017 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1018 break;
1019 }
1020#endif
1021}
1022
1023
1024/**
1025 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1026 *
1027 * This is mostly a copy of iemInitDecoder.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1030 */
1031DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1032{
1033 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1034
1035 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1036
1037#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1046#endif
1047
1048 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1049#ifdef IEM_VERIFICATION_MODE_FULL
1050 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1051 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1052#endif
1053 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1054 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1055 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1056 pVCpu->iem.s.enmEffAddrMode = enmMode;
1057 if (enmMode != IEMMODE_64BIT)
1058 {
1059 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1060 pVCpu->iem.s.enmEffOpSize = enmMode;
1061 }
1062 else
1063 {
1064 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1065 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1066 }
1067 pVCpu->iem.s.fPrefixes = 0;
1068 pVCpu->iem.s.uRexReg = 0;
1069 pVCpu->iem.s.uRexB = 0;
1070 pVCpu->iem.s.uRexIndex = 0;
1071 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1072#ifdef IEM_WITH_CODE_TLB
1073 if (pVCpu->iem.s.pbInstrBuf)
1074 {
1075 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1076 - pVCpu->iem.s.uInstrBufPc;
1077 if (off < pVCpu->iem.s.cbInstrBufTotal)
1078 {
1079 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1080 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1081 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1082 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1083 else
1084 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1085 }
1086 else
1087 {
1088 pVCpu->iem.s.pbInstrBuf = NULL;
1089 pVCpu->iem.s.offInstrNextByte = 0;
1090 pVCpu->iem.s.offCurInstrStart = 0;
1091 pVCpu->iem.s.cbInstrBuf = 0;
1092 pVCpu->iem.s.cbInstrBufTotal = 0;
1093 }
1094 }
1095 else
1096 {
1097 pVCpu->iem.s.offInstrNextByte = 0;
1098 pVCpu->iem.s.offCurInstrStart = 0;
1099 pVCpu->iem.s.cbInstrBuf = 0;
1100 pVCpu->iem.s.cbInstrBufTotal = 0;
1101 }
1102#else
1103 pVCpu->iem.s.cbOpcode = 0;
1104 pVCpu->iem.s.offOpcode = 0;
1105#endif
1106 Assert(pVCpu->iem.s.cActiveMappings == 0);
1107 pVCpu->iem.s.iNextMapping = 0;
1108 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1109 Assert(pVCpu->iem.s.fBypassHandlers == false);
1110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1111 if (!pVCpu->iem.s.fInPatchCode)
1112 { /* likely */ }
1113 else
1114 {
1115 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1116 && pCtx->cs.u64Base == 0
1117 && pCtx->cs.u32Limit == UINT32_MAX
1118 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1119 if (!pVCpu->iem.s.fInPatchCode)
1120 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1121 }
1122#endif
1123
1124#ifdef DBGFTRACE_ENABLED
1125 switch (enmMode)
1126 {
1127 case IEMMODE_64BIT:
1128 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1129 break;
1130 case IEMMODE_32BIT:
1131 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1132 break;
1133 case IEMMODE_16BIT:
1134 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1135 break;
1136 }
1137#endif
1138}
1139
1140
1141
1142/**
1143 * Prefetch opcodes the first time when starting executing.
1144 *
1145 * @returns Strict VBox status code.
1146 * @param pVCpu The cross context virtual CPU structure of the
1147 * calling thread.
1148 * @param fBypassHandlers Whether to bypass access handlers.
1149 */
1150IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1151{
1152#ifdef IEM_VERIFICATION_MODE_FULL
1153 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1154#endif
1155 iemInitDecoder(pVCpu, fBypassHandlers);
1156
1157#ifdef IEM_WITH_CODE_TLB
1158 /** @todo Do ITLB lookup here. */
1159
1160#else /* !IEM_WITH_CODE_TLB */
1161
1162 /*
1163 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1164 *
1165 * First translate CS:rIP to a physical address.
1166 */
1167 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1168 uint32_t cbToTryRead;
1169 RTGCPTR GCPtrPC;
1170 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1171 {
1172 cbToTryRead = PAGE_SIZE;
1173 GCPtrPC = pCtx->rip;
1174 if (!IEM_IS_CANONICAL(GCPtrPC))
1175 return iemRaiseGeneralProtectionFault0(pVCpu);
1176 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1177 }
1178 else
1179 {
1180 uint32_t GCPtrPC32 = pCtx->eip;
1181 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1182 if (GCPtrPC32 > pCtx->cs.u32Limit)
1183 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1184 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1185 if (!cbToTryRead) /* overflowed */
1186 {
1187 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1188 cbToTryRead = UINT32_MAX;
1189 }
1190 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1191 Assert(GCPtrPC <= UINT32_MAX);
1192 }
1193
1194# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1195 /* Allow interpretation of patch manager code blocks since they can for
1196 instance throw #PFs for perfectly good reasons. */
1197 if (pVCpu->iem.s.fInPatchCode)
1198 {
1199 size_t cbRead = 0;
1200 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1201 AssertRCReturn(rc, rc);
1202 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1203 return VINF_SUCCESS;
1204 }
1205# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1206
1207 RTGCPHYS GCPhys;
1208 uint64_t fFlags;
1209 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1210 if (RT_FAILURE(rc))
1211 {
1212 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1213 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1214 }
1215 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1216 {
1217 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1218 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1219 }
1220 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1221 {
1222 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1223 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1224 }
1225 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1226 /** @todo Check reserved bits and such stuff. PGM is better at doing
1227 * that, so do it when implementing the guest virtual address
1228 * TLB... */
1229
1230# ifdef IEM_VERIFICATION_MODE_FULL
1231 /*
1232 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1233 * instruction.
1234 */
1235 /** @todo optimize this differently by not using PGMPhysRead. */
1236 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1237 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1238 if ( offPrevOpcodes < cbOldOpcodes
1239 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1240 {
1241 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1242 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1243 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1244 pVCpu->iem.s.cbOpcode = cbNew;
1245 return VINF_SUCCESS;
1246 }
1247# endif
1248
1249 /*
1250 * Read the bytes at this address.
1251 */
1252 PVM pVM = pVCpu->CTX_SUFF(pVM);
1253# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1254 size_t cbActual;
1255 if ( PATMIsEnabled(pVM)
1256 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1257 {
1258 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1259 Assert(cbActual > 0);
1260 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1261 }
1262 else
1263# endif
1264 {
1265 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1266 if (cbToTryRead > cbLeftOnPage)
1267 cbToTryRead = cbLeftOnPage;
1268 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1269 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1270
1271 if (!pVCpu->iem.s.fBypassHandlers)
1272 {
1273 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1274 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1275 { /* likely */ }
1276 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1277 {
1278 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1279 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1281 }
1282 else
1283 {
1284 Log((RT_SUCCESS(rcStrict)
1285 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1286 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1287 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1288 return rcStrict;
1289 }
1290 }
1291 else
1292 {
1293 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1294 if (RT_SUCCESS(rc))
1295 { /* likely */ }
1296 else
1297 {
1298 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1299 GCPtrPC, GCPhys, rc, cbToTryRead));
1300 return rc;
1301 }
1302 }
1303 pVCpu->iem.s.cbOpcode = cbToTryRead;
1304 }
1305#endif /* !IEM_WITH_CODE_TLB */
1306 return VINF_SUCCESS;
1307}
1308
1309
1310/**
1311 * Invalidates the IEM TLBs.
1312 *
1313 * This is called internally as well as by PGM when moving GC mappings.
1314 *
1315 * @returns
1316 * @param pVCpu The cross context virtual CPU structure of the calling
1317 * thread.
1318 * @param fVmm Set when PGM calls us with a remapping.
1319 */
1320VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1321{
1322#ifdef IEM_WITH_CODE_TLB
1323 pVCpu->iem.s.cbInstrBufTotal = 0;
1324 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1325 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1326 { /* very likely */ }
1327 else
1328 {
1329 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1330 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1331 while (i-- > 0)
1332 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1333 }
1334#endif
1335
1336#ifdef IEM_WITH_DATA_TLB
1337 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1338 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1339 { /* very likely */ }
1340 else
1341 {
1342 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1343 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1344 while (i-- > 0)
1345 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1346 }
1347#endif
1348 NOREF(pVCpu); NOREF(fVmm);
1349}
1350
1351
1352/**
1353 * Invalidates a page in the TLBs.
1354 *
1355 * @param pVCpu The cross context virtual CPU structure of the calling
1356 * thread.
1357 * @param GCPtr The address of the page to invalidate
1358 */
1359VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1360{
1361#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1362 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1363 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1364 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1365 uintptr_t idx = (uint8_t)GCPtr;
1366
1367# ifdef IEM_WITH_CODE_TLB
1368 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1369 {
1370 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1371 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1372 pVCpu->iem.s.cbInstrBufTotal = 0;
1373 }
1374# endif
1375
1376# ifdef IEM_WITH_DATA_TLB
1377 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1378 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1379# endif
1380#else
1381 NOREF(pVCpu); NOREF(GCPtr);
1382#endif
1383}
1384
1385
1386/**
1387 * Invalidates the host physical aspects of the IEM TLBs.
1388 *
1389 * This is called internally as well as by PGM when moving GC mappings.
1390 *
1391 * @param pVCpu The cross context virtual CPU structure of the calling
1392 * thread.
1393 */
1394VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1395{
1396#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1397 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1398
1399# ifdef IEM_WITH_CODE_TLB
1400 pVCpu->iem.s.cbInstrBufTotal = 0;
1401# endif
1402 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1403 if (uTlbPhysRev != 0)
1404 {
1405 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1406 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1407 }
1408 else
1409 {
1410 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1411 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1412
1413 unsigned i;
1414# ifdef IEM_WITH_CODE_TLB
1415 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1416 while (i-- > 0)
1417 {
1418 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1419 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1420 }
1421# endif
1422# ifdef IEM_WITH_DATA_TLB
1423 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1424 while (i-- > 0)
1425 {
1426 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1427 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1428 }
1429# endif
1430 }
1431#else
1432 NOREF(pVCpu);
1433#endif
1434}
1435
1436
1437/**
1438 * Invalidates the host physical aspects of the IEM TLBs.
1439 *
1440 * This is called internally as well as by PGM when moving GC mappings.
1441 *
1442 * @param pVM The cross context VM structure.
1443 *
1444 * @remarks Caller holds the PGM lock.
1445 */
1446VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1447{
1448
1449}
1450
1451#ifdef IEM_WITH_CODE_TLB
1452
1453/**
1454 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1455 * failure and jumps.
1456 *
1457 * We end up here for a number of reasons:
1458 * - pbInstrBuf isn't yet initialized.
1459 * - Advancing beyond the buffer boundrary (e.g. cross page).
1460 * - Advancing beyond the CS segment limit.
1461 * - Fetching from non-mappable page (e.g. MMIO).
1462 *
1463 * @param pVCpu The cross context virtual CPU structure of the
1464 * calling thread.
1465 * @param pvDst Where to return the bytes.
1466 * @param cbDst Number of bytes to read.
1467 *
1468 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1469 */
1470IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1471{
1472#ifdef IN_RING3
1473//__debugbreak();
1474#else
1475 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1476#endif
1477 for (;;)
1478 {
1479 Assert(cbDst <= 8);
1480 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1481
1482 /*
1483 * We might have a partial buffer match, deal with that first to make the
1484 * rest simpler. This is the first part of the cross page/buffer case.
1485 */
1486 if (pVCpu->iem.s.pbInstrBuf != NULL)
1487 {
1488 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1489 {
1490 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1491 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1492 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1493
1494 cbDst -= cbCopy;
1495 pvDst = (uint8_t *)pvDst + cbCopy;
1496 offBuf += cbCopy;
1497 pVCpu->iem.s.offInstrNextByte += offBuf;
1498 }
1499 }
1500
1501 /*
1502 * Check segment limit, figuring how much we're allowed to access at this point.
1503 *
1504 * We will fault immediately if RIP is past the segment limit / in non-canonical
1505 * territory. If we do continue, there are one or more bytes to read before we
1506 * end up in trouble and we need to do that first before faulting.
1507 */
1508 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1509 RTGCPTR GCPtrFirst;
1510 uint32_t cbMaxRead;
1511 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1512 {
1513 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1514 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1515 { /* likely */ }
1516 else
1517 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1518 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1519 }
1520 else
1521 {
1522 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1523 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1524 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1525 { /* likely */ }
1526 else
1527 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1528 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1529 if (cbMaxRead != 0)
1530 { /* likely */ }
1531 else
1532 {
1533 /* Overflowed because address is 0 and limit is max. */
1534 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1535 cbMaxRead = X86_PAGE_SIZE;
1536 }
1537 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1538 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1539 if (cbMaxRead2 < cbMaxRead)
1540 cbMaxRead = cbMaxRead2;
1541 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1542 }
1543
1544 /*
1545 * Get the TLB entry for this piece of code.
1546 */
1547 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1549 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1550 if (pTlbe->uTag == uTag)
1551 {
1552 /* likely when executing lots of code, otherwise unlikely */
1553# ifdef VBOX_WITH_STATISTICS
1554 pVCpu->iem.s.CodeTlb.cTlbHits++;
1555# endif
1556 }
1557 else
1558 {
1559 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1560# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1561 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1562 {
1563 pTlbe->uTag = uTag;
1564 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1565 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1566 pTlbe->GCPhys = NIL_RTGCPHYS;
1567 pTlbe->pbMappingR3 = NULL;
1568 }
1569 else
1570# endif
1571 {
1572 RTGCPHYS GCPhys;
1573 uint64_t fFlags;
1574 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1575 if (RT_FAILURE(rc))
1576 {
1577 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1578 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1579 }
1580
1581 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1582 pTlbe->uTag = uTag;
1583 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1584 pTlbe->GCPhys = GCPhys;
1585 pTlbe->pbMappingR3 = NULL;
1586 }
1587 }
1588
1589 /*
1590 * Check TLB page table level access flags.
1591 */
1592 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1593 {
1594 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1595 {
1596 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1597 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1598 }
1599 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1600 {
1601 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1602 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1603 }
1604 }
1605
1606# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1607 /*
1608 * Allow interpretation of patch manager code blocks since they can for
1609 * instance throw #PFs for perfectly good reasons.
1610 */
1611 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1612 { /* no unlikely */ }
1613 else
1614 {
1615 /** @todo Could be optimized this a little in ring-3 if we liked. */
1616 size_t cbRead = 0;
1617 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1618 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1619 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1620 return;
1621 }
1622# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1623
1624 /*
1625 * Look up the physical page info if necessary.
1626 */
1627 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1628 { /* not necessary */ }
1629 else
1630 {
1631 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1632 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1633 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1634 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1635 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1636 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1637 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1638 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1639 }
1640
1641# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1642 /*
1643 * Try do a direct read using the pbMappingR3 pointer.
1644 */
1645 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1646 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1647 {
1648 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1649 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1650 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1651 {
1652 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1653 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1654 }
1655 else
1656 {
1657 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1658 Assert(cbInstr < cbMaxRead);
1659 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1660 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1661 }
1662 if (cbDst <= cbMaxRead)
1663 {
1664 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1665 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1666 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1667 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1668 return;
1669 }
1670 pVCpu->iem.s.pbInstrBuf = NULL;
1671
1672 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1673 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1674 }
1675 else
1676# endif
1677#if 0
1678 /*
1679 * If there is no special read handling, so we can read a bit more and
1680 * put it in the prefetch buffer.
1681 */
1682 if ( cbDst < cbMaxRead
1683 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1684 {
1685 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1686 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1687 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1688 { /* likely */ }
1689 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1690 {
1691 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1692 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1693 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1694 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1695 }
1696 else
1697 {
1698 Log((RT_SUCCESS(rcStrict)
1699 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1700 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1701 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1702 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1703 }
1704 }
1705 /*
1706 * Special read handling, so only read exactly what's needed.
1707 * This is a highly unlikely scenario.
1708 */
1709 else
1710#endif
1711 {
1712 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1713 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1714 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1715 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1716 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1717 { /* likely */ }
1718 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1719 {
1720 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1721 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1722 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1723 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1724 }
1725 else
1726 {
1727 Log((RT_SUCCESS(rcStrict)
1728 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1729 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1730 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1731 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1732 }
1733 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1734 if (cbToRead == cbDst)
1735 return;
1736 }
1737
1738 /*
1739 * More to read, loop.
1740 */
1741 cbDst -= cbMaxRead;
1742 pvDst = (uint8_t *)pvDst + cbMaxRead;
1743 }
1744}
1745
1746#else
1747
1748/**
1749 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1750 * exception if it fails.
1751 *
1752 * @returns Strict VBox status code.
1753 * @param pVCpu The cross context virtual CPU structure of the
1754 * calling thread.
1755 * @param cbMin The minimum number of bytes relative offOpcode
1756 * that must be read.
1757 */
1758IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1759{
1760 /*
1761 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1762 *
1763 * First translate CS:rIP to a physical address.
1764 */
1765 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1766 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1767 uint32_t cbToTryRead;
1768 RTGCPTR GCPtrNext;
1769 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1770 {
1771 cbToTryRead = PAGE_SIZE;
1772 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1773 if (!IEM_IS_CANONICAL(GCPtrNext))
1774 return iemRaiseGeneralProtectionFault0(pVCpu);
1775 }
1776 else
1777 {
1778 uint32_t GCPtrNext32 = pCtx->eip;
1779 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1780 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1781 if (GCPtrNext32 > pCtx->cs.u32Limit)
1782 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1783 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1784 if (!cbToTryRead) /* overflowed */
1785 {
1786 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1787 cbToTryRead = UINT32_MAX;
1788 /** @todo check out wrapping around the code segment. */
1789 }
1790 if (cbToTryRead < cbMin - cbLeft)
1791 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1792 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1793 }
1794
1795 /* Only read up to the end of the page, and make sure we don't read more
1796 than the opcode buffer can hold. */
1797 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1798 if (cbToTryRead > cbLeftOnPage)
1799 cbToTryRead = cbLeftOnPage;
1800 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1801 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1802/** @todo r=bird: Convert assertion into undefined opcode exception? */
1803 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1804
1805# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1806 /* Allow interpretation of patch manager code blocks since they can for
1807 instance throw #PFs for perfectly good reasons. */
1808 if (pVCpu->iem.s.fInPatchCode)
1809 {
1810 size_t cbRead = 0;
1811 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1812 AssertRCReturn(rc, rc);
1813 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1814 return VINF_SUCCESS;
1815 }
1816# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1817
1818 RTGCPHYS GCPhys;
1819 uint64_t fFlags;
1820 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1821 if (RT_FAILURE(rc))
1822 {
1823 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1824 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1825 }
1826 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1827 {
1828 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1829 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1830 }
1831 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1832 {
1833 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1834 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1835 }
1836 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1837 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1838 /** @todo Check reserved bits and such stuff. PGM is better at doing
1839 * that, so do it when implementing the guest virtual address
1840 * TLB... */
1841
1842 /*
1843 * Read the bytes at this address.
1844 *
1845 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1846 * and since PATM should only patch the start of an instruction there
1847 * should be no need to check again here.
1848 */
1849 if (!pVCpu->iem.s.fBypassHandlers)
1850 {
1851 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1852 cbToTryRead, PGMACCESSORIGIN_IEM);
1853 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1854 { /* likely */ }
1855 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1856 {
1857 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1858 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1860 }
1861 else
1862 {
1863 Log((RT_SUCCESS(rcStrict)
1864 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1865 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1866 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1867 return rcStrict;
1868 }
1869 }
1870 else
1871 {
1872 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1873 if (RT_SUCCESS(rc))
1874 { /* likely */ }
1875 else
1876 {
1877 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1878 return rc;
1879 }
1880 }
1881 pVCpu->iem.s.cbOpcode += cbToTryRead;
1882 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1883
1884 return VINF_SUCCESS;
1885}
1886
1887#endif /* !IEM_WITH_CODE_TLB */
1888#ifndef IEM_WITH_SETJMP
1889
1890/**
1891 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1892 *
1893 * @returns Strict VBox status code.
1894 * @param pVCpu The cross context virtual CPU structure of the
1895 * calling thread.
1896 * @param pb Where to return the opcode byte.
1897 */
1898DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1899{
1900 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1901 if (rcStrict == VINF_SUCCESS)
1902 {
1903 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1904 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1905 pVCpu->iem.s.offOpcode = offOpcode + 1;
1906 }
1907 else
1908 *pb = 0;
1909 return rcStrict;
1910}
1911
1912
1913/**
1914 * Fetches the next opcode byte.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pVCpu The cross context virtual CPU structure of the
1918 * calling thread.
1919 * @param pu8 Where to return the opcode byte.
1920 */
1921DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1922{
1923 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1924 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1925 {
1926 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1927 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1928 return VINF_SUCCESS;
1929 }
1930 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1931}
1932
1933#else /* IEM_WITH_SETJMP */
1934
1935/**
1936 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1937 *
1938 * @returns The opcode byte.
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 */
1941DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1942{
1943# ifdef IEM_WITH_CODE_TLB
1944 uint8_t u8;
1945 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1946 return u8;
1947# else
1948 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1949 if (rcStrict == VINF_SUCCESS)
1950 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1951 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1952# endif
1953}
1954
1955
1956/**
1957 * Fetches the next opcode byte, longjmp on error.
1958 *
1959 * @returns The opcode byte.
1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1961 */
1962DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1963{
1964# ifdef IEM_WITH_CODE_TLB
1965 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1966 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1967 if (RT_LIKELY( pbBuf != NULL
1968 && offBuf < pVCpu->iem.s.cbInstrBuf))
1969 {
1970 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1971 return pbBuf[offBuf];
1972 }
1973# else
1974 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1975 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1976 {
1977 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1978 return pVCpu->iem.s.abOpcode[offOpcode];
1979 }
1980# endif
1981 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1982}
1983
1984#endif /* IEM_WITH_SETJMP */
1985
1986/**
1987 * Fetches the next opcode byte, returns automatically on failure.
1988 *
1989 * @param a_pu8 Where to return the opcode byte.
1990 * @remark Implicitly references pVCpu.
1991 */
1992#ifndef IEM_WITH_SETJMP
1993# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1994 do \
1995 { \
1996 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1997 if (rcStrict2 == VINF_SUCCESS) \
1998 { /* likely */ } \
1999 else \
2000 return rcStrict2; \
2001 } while (0)
2002#else
2003# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2004#endif /* IEM_WITH_SETJMP */
2005
2006
2007#ifndef IEM_WITH_SETJMP
2008/**
2009 * Fetches the next signed byte from the opcode stream.
2010 *
2011 * @returns Strict VBox status code.
2012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2013 * @param pi8 Where to return the signed byte.
2014 */
2015DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2016{
2017 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2018}
2019#endif /* !IEM_WITH_SETJMP */
2020
2021
2022/**
2023 * Fetches the next signed byte from the opcode stream, returning automatically
2024 * on failure.
2025 *
2026 * @param a_pi8 Where to return the signed byte.
2027 * @remark Implicitly references pVCpu.
2028 */
2029#ifndef IEM_WITH_SETJMP
2030# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2031 do \
2032 { \
2033 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2034 if (rcStrict2 != VINF_SUCCESS) \
2035 return rcStrict2; \
2036 } while (0)
2037#else /* IEM_WITH_SETJMP */
2038# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2039
2040#endif /* IEM_WITH_SETJMP */
2041
2042#ifndef IEM_WITH_SETJMP
2043
2044/**
2045 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2046 *
2047 * @returns Strict VBox status code.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 * @param pu16 Where to return the opcode dword.
2050 */
2051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2052{
2053 uint8_t u8;
2054 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2055 if (rcStrict == VINF_SUCCESS)
2056 *pu16 = (int8_t)u8;
2057 return rcStrict;
2058}
2059
2060
2061/**
2062 * Fetches the next signed byte from the opcode stream, extending it to
2063 * unsigned 16-bit.
2064 *
2065 * @returns Strict VBox status code.
2066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2067 * @param pu16 Where to return the unsigned word.
2068 */
2069DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2070{
2071 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2072 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2073 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2074
2075 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2076 pVCpu->iem.s.offOpcode = offOpcode + 1;
2077 return VINF_SUCCESS;
2078}
2079
2080#endif /* !IEM_WITH_SETJMP */
2081
2082/**
2083 * Fetches the next signed byte from the opcode stream and sign-extending it to
2084 * a word, returning automatically on failure.
2085 *
2086 * @param a_pu16 Where to return the word.
2087 * @remark Implicitly references pVCpu.
2088 */
2089#ifndef IEM_WITH_SETJMP
2090# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2091 do \
2092 { \
2093 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2094 if (rcStrict2 != VINF_SUCCESS) \
2095 return rcStrict2; \
2096 } while (0)
2097#else
2098# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2099#endif
2100
2101#ifndef IEM_WITH_SETJMP
2102
2103/**
2104 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2105 *
2106 * @returns Strict VBox status code.
2107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2108 * @param pu32 Where to return the opcode dword.
2109 */
2110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2111{
2112 uint8_t u8;
2113 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2114 if (rcStrict == VINF_SUCCESS)
2115 *pu32 = (int8_t)u8;
2116 return rcStrict;
2117}
2118
2119
2120/**
2121 * Fetches the next signed byte from the opcode stream, extending it to
2122 * unsigned 32-bit.
2123 *
2124 * @returns Strict VBox status code.
2125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2126 * @param pu32 Where to return the unsigned dword.
2127 */
2128DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2129{
2130 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2131 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2132 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2133
2134 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2135 pVCpu->iem.s.offOpcode = offOpcode + 1;
2136 return VINF_SUCCESS;
2137}
2138
2139#endif /* !IEM_WITH_SETJMP */
2140
2141/**
2142 * Fetches the next signed byte from the opcode stream and sign-extending it to
2143 * a word, returning automatically on failure.
2144 *
2145 * @param a_pu32 Where to return the word.
2146 * @remark Implicitly references pVCpu.
2147 */
2148#ifndef IEM_WITH_SETJMP
2149#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2150 do \
2151 { \
2152 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2153 if (rcStrict2 != VINF_SUCCESS) \
2154 return rcStrict2; \
2155 } while (0)
2156#else
2157# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2158#endif
2159
2160#ifndef IEM_WITH_SETJMP
2161
2162/**
2163 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2164 *
2165 * @returns Strict VBox status code.
2166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2167 * @param pu64 Where to return the opcode qword.
2168 */
2169DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2170{
2171 uint8_t u8;
2172 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2173 if (rcStrict == VINF_SUCCESS)
2174 *pu64 = (int8_t)u8;
2175 return rcStrict;
2176}
2177
2178
2179/**
2180 * Fetches the next signed byte from the opcode stream, extending it to
2181 * unsigned 64-bit.
2182 *
2183 * @returns Strict VBox status code.
2184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2185 * @param pu64 Where to return the unsigned qword.
2186 */
2187DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2188{
2189 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2190 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2191 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2192
2193 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2194 pVCpu->iem.s.offOpcode = offOpcode + 1;
2195 return VINF_SUCCESS;
2196}
2197
2198#endif /* !IEM_WITH_SETJMP */
2199
2200
2201/**
2202 * Fetches the next signed byte from the opcode stream and sign-extending it to
2203 * a word, returning automatically on failure.
2204 *
2205 * @param a_pu64 Where to return the word.
2206 * @remark Implicitly references pVCpu.
2207 */
2208#ifndef IEM_WITH_SETJMP
2209# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2210 do \
2211 { \
2212 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2213 if (rcStrict2 != VINF_SUCCESS) \
2214 return rcStrict2; \
2215 } while (0)
2216#else
2217# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2218#endif
2219
2220
2221#ifndef IEM_WITH_SETJMP
2222
2223/**
2224 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2225 *
2226 * @returns Strict VBox status code.
2227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2228 * @param pu16 Where to return the opcode word.
2229 */
2230DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2231{
2232 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2233 if (rcStrict == VINF_SUCCESS)
2234 {
2235 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2236# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2237 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2238# else
2239 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2240# endif
2241 pVCpu->iem.s.offOpcode = offOpcode + 2;
2242 }
2243 else
2244 *pu16 = 0;
2245 return rcStrict;
2246}
2247
2248
2249/**
2250 * Fetches the next opcode word.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode word.
2255 */
2256DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2259 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2260 {
2261 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2263 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2264# else
2265 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2266# endif
2267 return VINF_SUCCESS;
2268 }
2269 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2270}
2271
2272#else /* IEM_WITH_SETJMP */
2273
2274/**
2275 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2276 *
2277 * @returns The opcode word.
2278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2279 */
2280DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2281{
2282# ifdef IEM_WITH_CODE_TLB
2283 uint16_t u16;
2284 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2285 return u16;
2286# else
2287 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2288 if (rcStrict == VINF_SUCCESS)
2289 {
2290 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2291 pVCpu->iem.s.offOpcode += 2;
2292# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2293 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2294# else
2295 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2296# endif
2297 }
2298 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2299# endif
2300}
2301
2302
2303/**
2304 * Fetches the next opcode word, longjmp on error.
2305 *
2306 * @returns The opcode word.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 */
2309DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2310{
2311# ifdef IEM_WITH_CODE_TLB
2312 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2313 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2314 if (RT_LIKELY( pbBuf != NULL
2315 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2316 {
2317 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2318# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2319 return *(uint16_t const *)&pbBuf[offBuf];
2320# else
2321 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2322# endif
2323 }
2324# else
2325 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2327 {
2328 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2329# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2330 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2331# else
2332 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2333# endif
2334 }
2335# endif
2336 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2337}
2338
2339#endif /* IEM_WITH_SETJMP */
2340
2341
2342/**
2343 * Fetches the next opcode word, returns automatically on failure.
2344 *
2345 * @param a_pu16 Where to return the opcode word.
2346 * @remark Implicitly references pVCpu.
2347 */
2348#ifndef IEM_WITH_SETJMP
2349# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2350 do \
2351 { \
2352 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2353 if (rcStrict2 != VINF_SUCCESS) \
2354 return rcStrict2; \
2355 } while (0)
2356#else
2357# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2358#endif
2359
2360#ifndef IEM_WITH_SETJMP
2361
2362/**
2363 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2364 *
2365 * @returns Strict VBox status code.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 * @param pu32 Where to return the opcode double word.
2368 */
2369DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2370{
2371 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2372 if (rcStrict == VINF_SUCCESS)
2373 {
2374 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2375 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2376 pVCpu->iem.s.offOpcode = offOpcode + 2;
2377 }
2378 else
2379 *pu32 = 0;
2380 return rcStrict;
2381}
2382
2383
2384/**
2385 * Fetches the next opcode word, zero extending it to a double word.
2386 *
2387 * @returns Strict VBox status code.
2388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2389 * @param pu32 Where to return the opcode double word.
2390 */
2391DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2392{
2393 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2394 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2395 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2396
2397 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2398 pVCpu->iem.s.offOpcode = offOpcode + 2;
2399 return VINF_SUCCESS;
2400}
2401
2402#endif /* !IEM_WITH_SETJMP */
2403
2404
2405/**
2406 * Fetches the next opcode word and zero extends it to a double word, returns
2407 * automatically on failure.
2408 *
2409 * @param a_pu32 Where to return the opcode double word.
2410 * @remark Implicitly references pVCpu.
2411 */
2412#ifndef IEM_WITH_SETJMP
2413# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2414 do \
2415 { \
2416 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2417 if (rcStrict2 != VINF_SUCCESS) \
2418 return rcStrict2; \
2419 } while (0)
2420#else
2421# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2422#endif
2423
2424#ifndef IEM_WITH_SETJMP
2425
2426/**
2427 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2428 *
2429 * @returns Strict VBox status code.
2430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2431 * @param pu64 Where to return the opcode quad word.
2432 */
2433DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2434{
2435 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2436 if (rcStrict == VINF_SUCCESS)
2437 {
2438 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2439 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2440 pVCpu->iem.s.offOpcode = offOpcode + 2;
2441 }
2442 else
2443 *pu64 = 0;
2444 return rcStrict;
2445}
2446
2447
2448/**
2449 * Fetches the next opcode word, zero extending it to a quad word.
2450 *
2451 * @returns Strict VBox status code.
2452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2453 * @param pu64 Where to return the opcode quad word.
2454 */
2455DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2456{
2457 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2458 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2459 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2460
2461 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2462 pVCpu->iem.s.offOpcode = offOpcode + 2;
2463 return VINF_SUCCESS;
2464}
2465
2466#endif /* !IEM_WITH_SETJMP */
2467
2468/**
2469 * Fetches the next opcode word and zero extends it to a quad word, returns
2470 * automatically on failure.
2471 *
2472 * @param a_pu64 Where to return the opcode quad word.
2473 * @remark Implicitly references pVCpu.
2474 */
2475#ifndef IEM_WITH_SETJMP
2476# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2477 do \
2478 { \
2479 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2480 if (rcStrict2 != VINF_SUCCESS) \
2481 return rcStrict2; \
2482 } while (0)
2483#else
2484# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2485#endif
2486
2487
2488#ifndef IEM_WITH_SETJMP
2489/**
2490 * Fetches the next signed word from the opcode stream.
2491 *
2492 * @returns Strict VBox status code.
2493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2494 * @param pi16 Where to return the signed word.
2495 */
2496DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2497{
2498 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2499}
2500#endif /* !IEM_WITH_SETJMP */
2501
2502
2503/**
2504 * Fetches the next signed word from the opcode stream, returning automatically
2505 * on failure.
2506 *
2507 * @param a_pi16 Where to return the signed word.
2508 * @remark Implicitly references pVCpu.
2509 */
2510#ifndef IEM_WITH_SETJMP
2511# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2512 do \
2513 { \
2514 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2515 if (rcStrict2 != VINF_SUCCESS) \
2516 return rcStrict2; \
2517 } while (0)
2518#else
2519# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2520#endif
2521
2522#ifndef IEM_WITH_SETJMP
2523
2524/**
2525 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2526 *
2527 * @returns Strict VBox status code.
2528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2529 * @param pu32 Where to return the opcode dword.
2530 */
2531DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2532{
2533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2534 if (rcStrict == VINF_SUCCESS)
2535 {
2536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2537# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2538 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2539# else
2540 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2541 pVCpu->iem.s.abOpcode[offOpcode + 1],
2542 pVCpu->iem.s.abOpcode[offOpcode + 2],
2543 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2544# endif
2545 pVCpu->iem.s.offOpcode = offOpcode + 4;
2546 }
2547 else
2548 *pu32 = 0;
2549 return rcStrict;
2550}
2551
2552
2553/**
2554 * Fetches the next opcode dword.
2555 *
2556 * @returns Strict VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2558 * @param pu32 Where to return the opcode double word.
2559 */
2560DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2561{
2562 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2563 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2564 {
2565 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2566# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2567 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2568# else
2569 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2570 pVCpu->iem.s.abOpcode[offOpcode + 1],
2571 pVCpu->iem.s.abOpcode[offOpcode + 2],
2572 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2573# endif
2574 return VINF_SUCCESS;
2575 }
2576 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2577}
2578
2579#else /* !IEM_WITH_SETJMP */
2580
2581/**
2582 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2583 *
2584 * @returns The opcode dword.
2585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2586 */
2587DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2588{
2589# ifdef IEM_WITH_CODE_TLB
2590 uint32_t u32;
2591 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2592 return u32;
2593# else
2594 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2595 if (rcStrict == VINF_SUCCESS)
2596 {
2597 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2598 pVCpu->iem.s.offOpcode = offOpcode + 4;
2599# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2600 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2601# else
2602 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2603 pVCpu->iem.s.abOpcode[offOpcode + 1],
2604 pVCpu->iem.s.abOpcode[offOpcode + 2],
2605 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2606# endif
2607 }
2608 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2609# endif
2610}
2611
2612
2613/**
2614 * Fetches the next opcode dword, longjmp on error.
2615 *
2616 * @returns The opcode dword.
2617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2618 */
2619DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2620{
2621# ifdef IEM_WITH_CODE_TLB
2622 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2623 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2624 if (RT_LIKELY( pbBuf != NULL
2625 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2626 {
2627 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2628# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2629 return *(uint32_t const *)&pbBuf[offBuf];
2630# else
2631 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2632 pbBuf[offBuf + 1],
2633 pbBuf[offBuf + 2],
2634 pbBuf[offBuf + 3]);
2635# endif
2636 }
2637# else
2638 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2639 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2640 {
2641 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2643 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2644# else
2645 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2646 pVCpu->iem.s.abOpcode[offOpcode + 1],
2647 pVCpu->iem.s.abOpcode[offOpcode + 2],
2648 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2649# endif
2650 }
2651# endif
2652 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2653}
2654
2655#endif /* !IEM_WITH_SETJMP */
2656
2657
2658/**
2659 * Fetches the next opcode dword, returns automatically on failure.
2660 *
2661 * @param a_pu32 Where to return the opcode dword.
2662 * @remark Implicitly references pVCpu.
2663 */
2664#ifndef IEM_WITH_SETJMP
2665# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2666 do \
2667 { \
2668 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2669 if (rcStrict2 != VINF_SUCCESS) \
2670 return rcStrict2; \
2671 } while (0)
2672#else
2673# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2674#endif
2675
2676#ifndef IEM_WITH_SETJMP
2677
2678/**
2679 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2680 *
2681 * @returns Strict VBox status code.
2682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2683 * @param pu64 Where to return the opcode dword.
2684 */
2685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2686{
2687 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2688 if (rcStrict == VINF_SUCCESS)
2689 {
2690 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2691 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2692 pVCpu->iem.s.abOpcode[offOpcode + 1],
2693 pVCpu->iem.s.abOpcode[offOpcode + 2],
2694 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2695 pVCpu->iem.s.offOpcode = offOpcode + 4;
2696 }
2697 else
2698 *pu64 = 0;
2699 return rcStrict;
2700}
2701
2702
2703/**
2704 * Fetches the next opcode dword, zero extending it to a quad word.
2705 *
2706 * @returns Strict VBox status code.
2707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2708 * @param pu64 Where to return the opcode quad word.
2709 */
2710DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2711{
2712 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2713 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2714 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2715
2716 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2717 pVCpu->iem.s.abOpcode[offOpcode + 1],
2718 pVCpu->iem.s.abOpcode[offOpcode + 2],
2719 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2720 pVCpu->iem.s.offOpcode = offOpcode + 4;
2721 return VINF_SUCCESS;
2722}
2723
2724#endif /* !IEM_WITH_SETJMP */
2725
2726
2727/**
2728 * Fetches the next opcode dword and zero extends it to a quad word, returns
2729 * automatically on failure.
2730 *
2731 * @param a_pu64 Where to return the opcode quad word.
2732 * @remark Implicitly references pVCpu.
2733 */
2734#ifndef IEM_WITH_SETJMP
2735# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2736 do \
2737 { \
2738 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2739 if (rcStrict2 != VINF_SUCCESS) \
2740 return rcStrict2; \
2741 } while (0)
2742#else
2743# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2744#endif
2745
2746
2747#ifndef IEM_WITH_SETJMP
2748/**
2749 * Fetches the next signed double word from the opcode stream.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pi32 Where to return the signed double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2756{
2757 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2758}
2759#endif
2760
2761/**
2762 * Fetches the next signed double word from the opcode stream, returning
2763 * automatically on failure.
2764 *
2765 * @param a_pi32 Where to return the signed double word.
2766 * @remark Implicitly references pVCpu.
2767 */
2768#ifndef IEM_WITH_SETJMP
2769# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2770 do \
2771 { \
2772 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2773 if (rcStrict2 != VINF_SUCCESS) \
2774 return rcStrict2; \
2775 } while (0)
2776#else
2777# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2778#endif
2779
2780#ifndef IEM_WITH_SETJMP
2781
2782/**
2783 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2784 *
2785 * @returns Strict VBox status code.
2786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2787 * @param pu64 Where to return the opcode qword.
2788 */
2789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2790{
2791 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2792 if (rcStrict == VINF_SUCCESS)
2793 {
2794 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2795 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2796 pVCpu->iem.s.abOpcode[offOpcode + 1],
2797 pVCpu->iem.s.abOpcode[offOpcode + 2],
2798 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2799 pVCpu->iem.s.offOpcode = offOpcode + 4;
2800 }
2801 else
2802 *pu64 = 0;
2803 return rcStrict;
2804}
2805
2806
2807/**
2808 * Fetches the next opcode dword, sign extending it into a quad word.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2812 * @param pu64 Where to return the opcode quad word.
2813 */
2814DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2815{
2816 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2817 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2818 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2819
2820 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2821 pVCpu->iem.s.abOpcode[offOpcode + 1],
2822 pVCpu->iem.s.abOpcode[offOpcode + 2],
2823 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2824 *pu64 = i32;
2825 pVCpu->iem.s.offOpcode = offOpcode + 4;
2826 return VINF_SUCCESS;
2827}
2828
2829#endif /* !IEM_WITH_SETJMP */
2830
2831
2832/**
2833 * Fetches the next opcode double word and sign extends it to a quad word,
2834 * returns automatically on failure.
2835 *
2836 * @param a_pu64 Where to return the opcode quad word.
2837 * @remark Implicitly references pVCpu.
2838 */
2839#ifndef IEM_WITH_SETJMP
2840# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2841 do \
2842 { \
2843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2844 if (rcStrict2 != VINF_SUCCESS) \
2845 return rcStrict2; \
2846 } while (0)
2847#else
2848# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2849#endif
2850
2851#ifndef IEM_WITH_SETJMP
2852
2853/**
2854 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2855 *
2856 * @returns Strict VBox status code.
2857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2858 * @param pu64 Where to return the opcode qword.
2859 */
2860DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2861{
2862 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2863 if (rcStrict == VINF_SUCCESS)
2864 {
2865 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2866# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2867 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2868# else
2869 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2870 pVCpu->iem.s.abOpcode[offOpcode + 1],
2871 pVCpu->iem.s.abOpcode[offOpcode + 2],
2872 pVCpu->iem.s.abOpcode[offOpcode + 3],
2873 pVCpu->iem.s.abOpcode[offOpcode + 4],
2874 pVCpu->iem.s.abOpcode[offOpcode + 5],
2875 pVCpu->iem.s.abOpcode[offOpcode + 6],
2876 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2877# endif
2878 pVCpu->iem.s.offOpcode = offOpcode + 8;
2879 }
2880 else
2881 *pu64 = 0;
2882 return rcStrict;
2883}
2884
2885
2886/**
2887 * Fetches the next opcode qword.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param pu64 Where to return the opcode qword.
2892 */
2893DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2894{
2895 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2896 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2897 {
2898# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2899 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2900# else
2901 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2902 pVCpu->iem.s.abOpcode[offOpcode + 1],
2903 pVCpu->iem.s.abOpcode[offOpcode + 2],
2904 pVCpu->iem.s.abOpcode[offOpcode + 3],
2905 pVCpu->iem.s.abOpcode[offOpcode + 4],
2906 pVCpu->iem.s.abOpcode[offOpcode + 5],
2907 pVCpu->iem.s.abOpcode[offOpcode + 6],
2908 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2909# endif
2910 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2911 return VINF_SUCCESS;
2912 }
2913 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2914}
2915
2916#else /* IEM_WITH_SETJMP */
2917
2918/**
2919 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2920 *
2921 * @returns The opcode qword.
2922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2923 */
2924DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2925{
2926# ifdef IEM_WITH_CODE_TLB
2927 uint64_t u64;
2928 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2929 return u64;
2930# else
2931 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2932 if (rcStrict == VINF_SUCCESS)
2933 {
2934 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2935 pVCpu->iem.s.offOpcode = offOpcode + 8;
2936# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2937 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2938# else
2939 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2940 pVCpu->iem.s.abOpcode[offOpcode + 1],
2941 pVCpu->iem.s.abOpcode[offOpcode + 2],
2942 pVCpu->iem.s.abOpcode[offOpcode + 3],
2943 pVCpu->iem.s.abOpcode[offOpcode + 4],
2944 pVCpu->iem.s.abOpcode[offOpcode + 5],
2945 pVCpu->iem.s.abOpcode[offOpcode + 6],
2946 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2947# endif
2948 }
2949 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2950# endif
2951}
2952
2953
2954/**
2955 * Fetches the next opcode qword, longjmp on error.
2956 *
2957 * @returns The opcode qword.
2958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2959 */
2960DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2961{
2962# ifdef IEM_WITH_CODE_TLB
2963 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2964 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2965 if (RT_LIKELY( pbBuf != NULL
2966 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2967 {
2968 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2969# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2970 return *(uint64_t const *)&pbBuf[offBuf];
2971# else
2972 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2973 pbBuf[offBuf + 1],
2974 pbBuf[offBuf + 2],
2975 pbBuf[offBuf + 3],
2976 pbBuf[offBuf + 4],
2977 pbBuf[offBuf + 5],
2978 pbBuf[offBuf + 6],
2979 pbBuf[offBuf + 7]);
2980# endif
2981 }
2982# else
2983 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2984 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2985 {
2986 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2987# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2988 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2989# else
2990 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3],
2994 pVCpu->iem.s.abOpcode[offOpcode + 4],
2995 pVCpu->iem.s.abOpcode[offOpcode + 5],
2996 pVCpu->iem.s.abOpcode[offOpcode + 6],
2997 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2998# endif
2999 }
3000# endif
3001 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3002}
3003
3004#endif /* IEM_WITH_SETJMP */
3005
3006/**
3007 * Fetches the next opcode quad word, returns automatically on failure.
3008 *
3009 * @param a_pu64 Where to return the opcode quad word.
3010 * @remark Implicitly references pVCpu.
3011 */
3012#ifndef IEM_WITH_SETJMP
3013# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3014 do \
3015 { \
3016 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3017 if (rcStrict2 != VINF_SUCCESS) \
3018 return rcStrict2; \
3019 } while (0)
3020#else
3021# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3022#endif
3023
3024
3025/** @name Misc Worker Functions.
3026 * @{
3027 */
3028
3029
3030/**
3031 * Validates a new SS segment.
3032 *
3033 * @returns VBox strict status code.
3034 * @param pVCpu The cross context virtual CPU structure of the
3035 * calling thread.
3036 * @param pCtx The CPU context.
3037 * @param NewSS The new SS selctor.
3038 * @param uCpl The CPL to load the stack for.
3039 * @param pDesc Where to return the descriptor.
3040 */
3041IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3042{
3043 NOREF(pCtx);
3044
3045 /* Null selectors are not allowed (we're not called for dispatching
3046 interrupts with SS=0 in long mode). */
3047 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3048 {
3049 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3050 return iemRaiseTaskSwitchFault0(pVCpu);
3051 }
3052
3053 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3054 if ((NewSS & X86_SEL_RPL) != uCpl)
3055 {
3056 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3057 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3058 }
3059
3060 /*
3061 * Read the descriptor.
3062 */
3063 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3064 if (rcStrict != VINF_SUCCESS)
3065 return rcStrict;
3066
3067 /*
3068 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3069 */
3070 if (!pDesc->Legacy.Gen.u1DescType)
3071 {
3072 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3073 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3074 }
3075
3076 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3077 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3078 {
3079 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3080 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3081 }
3082 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3083 {
3084 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3085 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3086 }
3087
3088 /* Is it there? */
3089 /** @todo testcase: Is this checked before the canonical / limit check below? */
3090 if (!pDesc->Legacy.Gen.u1Present)
3091 {
3092 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3093 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3094 }
3095
3096 return VINF_SUCCESS;
3097}
3098
3099
3100/**
3101 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3102 * not.
3103 *
3104 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3105 * @param a_pCtx The CPU context.
3106 */
3107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3108# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3109 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3110 ? (a_pCtx)->eflags.u \
3111 : CPUMRawGetEFlags(a_pVCpu) )
3112#else
3113# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3114 ( (a_pCtx)->eflags.u )
3115#endif
3116
3117/**
3118 * Updates the EFLAGS in the correct manner wrt. PATM.
3119 *
3120 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3121 * @param a_pCtx The CPU context.
3122 * @param a_fEfl The new EFLAGS.
3123 */
3124#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3125# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3126 do { \
3127 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3128 (a_pCtx)->eflags.u = (a_fEfl); \
3129 else \
3130 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3131 } while (0)
3132#else
3133# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3134 do { \
3135 (a_pCtx)->eflags.u = (a_fEfl); \
3136 } while (0)
3137#endif
3138
3139
3140/** @} */
3141
3142/** @name Raising Exceptions.
3143 *
3144 * @{
3145 */
3146
3147/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3148 * @{ */
3149/** CPU exception. */
3150#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3151/** External interrupt (from PIC, APIC, whatever). */
3152#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3153/** Software interrupt (int or into, not bound).
3154 * Returns to the following instruction */
3155#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3156/** Takes an error code. */
3157#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3158/** Takes a CR2. */
3159#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3160/** Generated by the breakpoint instruction. */
3161#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3162/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3163#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3164/** @} */
3165
3166
3167/**
3168 * Loads the specified stack far pointer from the TSS.
3169 *
3170 * @returns VBox strict status code.
3171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3172 * @param pCtx The CPU context.
3173 * @param uCpl The CPL to load the stack for.
3174 * @param pSelSS Where to return the new stack segment.
3175 * @param puEsp Where to return the new stack pointer.
3176 */
3177IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3178 PRTSEL pSelSS, uint32_t *puEsp)
3179{
3180 VBOXSTRICTRC rcStrict;
3181 Assert(uCpl < 4);
3182
3183 switch (pCtx->tr.Attr.n.u4Type)
3184 {
3185 /*
3186 * 16-bit TSS (X86TSS16).
3187 */
3188 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3189 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3190 {
3191 uint32_t off = uCpl * 4 + 2;
3192 if (off + 4 <= pCtx->tr.u32Limit)
3193 {
3194 /** @todo check actual access pattern here. */
3195 uint32_t u32Tmp = 0; /* gcc maybe... */
3196 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3197 if (rcStrict == VINF_SUCCESS)
3198 {
3199 *puEsp = RT_LOWORD(u32Tmp);
3200 *pSelSS = RT_HIWORD(u32Tmp);
3201 return VINF_SUCCESS;
3202 }
3203 }
3204 else
3205 {
3206 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3207 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3208 }
3209 break;
3210 }
3211
3212 /*
3213 * 32-bit TSS (X86TSS32).
3214 */
3215 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3216 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3217 {
3218 uint32_t off = uCpl * 8 + 4;
3219 if (off + 7 <= pCtx->tr.u32Limit)
3220 {
3221/** @todo check actual access pattern here. */
3222 uint64_t u64Tmp;
3223 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3224 if (rcStrict == VINF_SUCCESS)
3225 {
3226 *puEsp = u64Tmp & UINT32_MAX;
3227 *pSelSS = (RTSEL)(u64Tmp >> 32);
3228 return VINF_SUCCESS;
3229 }
3230 }
3231 else
3232 {
3233 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3234 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3235 }
3236 break;
3237 }
3238
3239 default:
3240 AssertFailed();
3241 rcStrict = VERR_IEM_IPE_4;
3242 break;
3243 }
3244
3245 *puEsp = 0; /* make gcc happy */
3246 *pSelSS = 0; /* make gcc happy */
3247 return rcStrict;
3248}
3249
3250
3251/**
3252 * Loads the specified stack pointer from the 64-bit TSS.
3253 *
3254 * @returns VBox strict status code.
3255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3256 * @param pCtx The CPU context.
3257 * @param uCpl The CPL to load the stack for.
3258 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3259 * @param puRsp Where to return the new stack pointer.
3260 */
3261IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3262{
3263 Assert(uCpl < 4);
3264 Assert(uIst < 8);
3265 *puRsp = 0; /* make gcc happy */
3266
3267 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3268
3269 uint32_t off;
3270 if (uIst)
3271 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3272 else
3273 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3274 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3275 {
3276 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3277 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3278 }
3279
3280 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3281}
3282
3283
3284/**
3285 * Adjust the CPU state according to the exception being raised.
3286 *
3287 * @param pCtx The CPU context.
3288 * @param u8Vector The exception that has been raised.
3289 */
3290DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3291{
3292 switch (u8Vector)
3293 {
3294 case X86_XCPT_DB:
3295 pCtx->dr[7] &= ~X86_DR7_GD;
3296 break;
3297 /** @todo Read the AMD and Intel exception reference... */
3298 }
3299}
3300
3301
3302/**
3303 * Implements exceptions and interrupts for real mode.
3304 *
3305 * @returns VBox strict status code.
3306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3307 * @param pCtx The CPU context.
3308 * @param cbInstr The number of bytes to offset rIP by in the return
3309 * address.
3310 * @param u8Vector The interrupt / exception vector number.
3311 * @param fFlags The flags.
3312 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3313 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3314 */
3315IEM_STATIC VBOXSTRICTRC
3316iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3317 PCPUMCTX pCtx,
3318 uint8_t cbInstr,
3319 uint8_t u8Vector,
3320 uint32_t fFlags,
3321 uint16_t uErr,
3322 uint64_t uCr2)
3323{
3324 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3325 NOREF(uErr); NOREF(uCr2);
3326
3327 /*
3328 * Read the IDT entry.
3329 */
3330 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3331 {
3332 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3333 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3334 }
3335 RTFAR16 Idte;
3336 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3337 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3338 return rcStrict;
3339
3340 /*
3341 * Push the stack frame.
3342 */
3343 uint16_t *pu16Frame;
3344 uint64_t uNewRsp;
3345 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3346 if (rcStrict != VINF_SUCCESS)
3347 return rcStrict;
3348
3349 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3350#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3351 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3352 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3353 fEfl |= UINT16_C(0xf000);
3354#endif
3355 pu16Frame[2] = (uint16_t)fEfl;
3356 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3357 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3358 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3359 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3360 return rcStrict;
3361
3362 /*
3363 * Load the vector address into cs:ip and make exception specific state
3364 * adjustments.
3365 */
3366 pCtx->cs.Sel = Idte.sel;
3367 pCtx->cs.ValidSel = Idte.sel;
3368 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3369 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3370 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3371 pCtx->rip = Idte.off;
3372 fEfl &= ~X86_EFL_IF;
3373 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3374
3375 /** @todo do we actually do this in real mode? */
3376 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3377 iemRaiseXcptAdjustState(pCtx, u8Vector);
3378
3379 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3380}
3381
3382
3383/**
3384 * Loads a NULL data selector into when coming from V8086 mode.
3385 *
3386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3387 * @param pSReg Pointer to the segment register.
3388 */
3389IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3390{
3391 pSReg->Sel = 0;
3392 pSReg->ValidSel = 0;
3393 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3394 {
3395 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3396 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3397 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3398 }
3399 else
3400 {
3401 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3402 /** @todo check this on AMD-V */
3403 pSReg->u64Base = 0;
3404 pSReg->u32Limit = 0;
3405 }
3406}
3407
3408
3409/**
3410 * Loads a segment selector during a task switch in V8086 mode.
3411 *
3412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3413 * @param pSReg Pointer to the segment register.
3414 * @param uSel The selector value to load.
3415 */
3416IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3417{
3418 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3419 pSReg->Sel = uSel;
3420 pSReg->ValidSel = uSel;
3421 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3422 pSReg->u64Base = uSel << 4;
3423 pSReg->u32Limit = 0xffff;
3424 pSReg->Attr.u = 0xf3;
3425}
3426
3427
3428/**
3429 * Loads a NULL data selector into a selector register, both the hidden and
3430 * visible parts, in protected mode.
3431 *
3432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3433 * @param pSReg Pointer to the segment register.
3434 * @param uRpl The RPL.
3435 */
3436IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3437{
3438 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3439 * data selector in protected mode. */
3440 pSReg->Sel = uRpl;
3441 pSReg->ValidSel = uRpl;
3442 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3443 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3444 {
3445 /* VT-x (Intel 3960x) observed doing something like this. */
3446 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3447 pSReg->u32Limit = UINT32_MAX;
3448 pSReg->u64Base = 0;
3449 }
3450 else
3451 {
3452 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3453 pSReg->u32Limit = 0;
3454 pSReg->u64Base = 0;
3455 }
3456}
3457
3458
3459/**
3460 * Loads a segment selector during a task switch in protected mode.
3461 *
3462 * In this task switch scenario, we would throw \#TS exceptions rather than
3463 * \#GPs.
3464 *
3465 * @returns VBox strict status code.
3466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3467 * @param pSReg Pointer to the segment register.
3468 * @param uSel The new selector value.
3469 *
3470 * @remarks This does _not_ handle CS or SS.
3471 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3472 */
3473IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3474{
3475 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3476
3477 /* Null data selector. */
3478 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3479 {
3480 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3481 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3482 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3483 return VINF_SUCCESS;
3484 }
3485
3486 /* Fetch the descriptor. */
3487 IEMSELDESC Desc;
3488 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3489 if (rcStrict != VINF_SUCCESS)
3490 {
3491 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3492 VBOXSTRICTRC_VAL(rcStrict)));
3493 return rcStrict;
3494 }
3495
3496 /* Must be a data segment or readable code segment. */
3497 if ( !Desc.Legacy.Gen.u1DescType
3498 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3499 {
3500 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3501 Desc.Legacy.Gen.u4Type));
3502 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3503 }
3504
3505 /* Check privileges for data segments and non-conforming code segments. */
3506 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3507 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3508 {
3509 /* The RPL and the new CPL must be less than or equal to the DPL. */
3510 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3511 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3512 {
3513 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3514 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3515 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3516 }
3517 }
3518
3519 /* Is it there? */
3520 if (!Desc.Legacy.Gen.u1Present)
3521 {
3522 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3523 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3524 }
3525
3526 /* The base and limit. */
3527 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3528 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3529
3530 /*
3531 * Ok, everything checked out fine. Now set the accessed bit before
3532 * committing the result into the registers.
3533 */
3534 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3535 {
3536 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3537 if (rcStrict != VINF_SUCCESS)
3538 return rcStrict;
3539 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3540 }
3541
3542 /* Commit */
3543 pSReg->Sel = uSel;
3544 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3545 pSReg->u32Limit = cbLimit;
3546 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3547 pSReg->ValidSel = uSel;
3548 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3549 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3550 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3551
3552 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3553 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3554 return VINF_SUCCESS;
3555}
3556
3557
3558/**
3559 * Performs a task switch.
3560 *
3561 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3562 * caller is responsible for performing the necessary checks (like DPL, TSS
3563 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3564 * reference for JMP, CALL, IRET.
3565 *
3566 * If the task switch is the due to a software interrupt or hardware exception,
3567 * the caller is responsible for validating the TSS selector and descriptor. See
3568 * Intel Instruction reference for INT n.
3569 *
3570 * @returns VBox strict status code.
3571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3572 * @param pCtx The CPU context.
3573 * @param enmTaskSwitch What caused this task switch.
3574 * @param uNextEip The EIP effective after the task switch.
3575 * @param fFlags The flags.
3576 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3577 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3578 * @param SelTSS The TSS selector of the new task.
3579 * @param pNewDescTSS Pointer to the new TSS descriptor.
3580 */
3581IEM_STATIC VBOXSTRICTRC
3582iemTaskSwitch(PVMCPU pVCpu,
3583 PCPUMCTX pCtx,
3584 IEMTASKSWITCH enmTaskSwitch,
3585 uint32_t uNextEip,
3586 uint32_t fFlags,
3587 uint16_t uErr,
3588 uint64_t uCr2,
3589 RTSEL SelTSS,
3590 PIEMSELDESC pNewDescTSS)
3591{
3592 Assert(!IEM_IS_REAL_MODE(pVCpu));
3593 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3594
3595 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3596 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3597 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3598 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3599 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3600
3601 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3602 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3603
3604 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3605 fIsNewTSS386, pCtx->eip, uNextEip));
3606
3607 /* Update CR2 in case it's a page-fault. */
3608 /** @todo This should probably be done much earlier in IEM/PGM. See
3609 * @bugref{5653#c49}. */
3610 if (fFlags & IEM_XCPT_FLAGS_CR2)
3611 pCtx->cr2 = uCr2;
3612
3613 /*
3614 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3615 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3616 */
3617 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3618 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3619 if (uNewTSSLimit < uNewTSSLimitMin)
3620 {
3621 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3622 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3623 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3624 }
3625
3626 /*
3627 * Check the current TSS limit. The last written byte to the current TSS during the
3628 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3629 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3630 *
3631 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3632 * end up with smaller than "legal" TSS limits.
3633 */
3634 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3635 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3636 if (uCurTSSLimit < uCurTSSLimitMin)
3637 {
3638 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3639 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3640 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3641 }
3642
3643 /*
3644 * Verify that the new TSS can be accessed and map it. Map only the required contents
3645 * and not the entire TSS.
3646 */
3647 void *pvNewTSS;
3648 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3649 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3650 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3651 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3652 * not perform correct translation if this happens. See Intel spec. 7.2.1
3653 * "Task-State Segment" */
3654 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3655 if (rcStrict != VINF_SUCCESS)
3656 {
3657 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3658 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3659 return rcStrict;
3660 }
3661
3662 /*
3663 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3664 */
3665 uint32_t u32EFlags = pCtx->eflags.u32;
3666 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3667 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3668 {
3669 PX86DESC pDescCurTSS;
3670 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3671 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3672 if (rcStrict != VINF_SUCCESS)
3673 {
3674 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3675 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3676 return rcStrict;
3677 }
3678
3679 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3680 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3681 if (rcStrict != VINF_SUCCESS)
3682 {
3683 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3684 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3685 return rcStrict;
3686 }
3687
3688 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3689 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3690 {
3691 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3692 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3693 u32EFlags &= ~X86_EFL_NT;
3694 }
3695 }
3696
3697 /*
3698 * Save the CPU state into the current TSS.
3699 */
3700 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3701 if (GCPtrNewTSS == GCPtrCurTSS)
3702 {
3703 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3704 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3705 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3706 }
3707 if (fIsNewTSS386)
3708 {
3709 /*
3710 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3711 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3712 */
3713 void *pvCurTSS32;
3714 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3715 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3716 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3717 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3718 if (rcStrict != VINF_SUCCESS)
3719 {
3720 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3721 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3722 return rcStrict;
3723 }
3724
3725 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3726 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3727 pCurTSS32->eip = uNextEip;
3728 pCurTSS32->eflags = u32EFlags;
3729 pCurTSS32->eax = pCtx->eax;
3730 pCurTSS32->ecx = pCtx->ecx;
3731 pCurTSS32->edx = pCtx->edx;
3732 pCurTSS32->ebx = pCtx->ebx;
3733 pCurTSS32->esp = pCtx->esp;
3734 pCurTSS32->ebp = pCtx->ebp;
3735 pCurTSS32->esi = pCtx->esi;
3736 pCurTSS32->edi = pCtx->edi;
3737 pCurTSS32->es = pCtx->es.Sel;
3738 pCurTSS32->cs = pCtx->cs.Sel;
3739 pCurTSS32->ss = pCtx->ss.Sel;
3740 pCurTSS32->ds = pCtx->ds.Sel;
3741 pCurTSS32->fs = pCtx->fs.Sel;
3742 pCurTSS32->gs = pCtx->gs.Sel;
3743
3744 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3745 if (rcStrict != VINF_SUCCESS)
3746 {
3747 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3748 VBOXSTRICTRC_VAL(rcStrict)));
3749 return rcStrict;
3750 }
3751 }
3752 else
3753 {
3754 /*
3755 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3756 */
3757 void *pvCurTSS16;
3758 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3759 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3760 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3761 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3762 if (rcStrict != VINF_SUCCESS)
3763 {
3764 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3765 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3766 return rcStrict;
3767 }
3768
3769 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3770 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3771 pCurTSS16->ip = uNextEip;
3772 pCurTSS16->flags = u32EFlags;
3773 pCurTSS16->ax = pCtx->ax;
3774 pCurTSS16->cx = pCtx->cx;
3775 pCurTSS16->dx = pCtx->dx;
3776 pCurTSS16->bx = pCtx->bx;
3777 pCurTSS16->sp = pCtx->sp;
3778 pCurTSS16->bp = pCtx->bp;
3779 pCurTSS16->si = pCtx->si;
3780 pCurTSS16->di = pCtx->di;
3781 pCurTSS16->es = pCtx->es.Sel;
3782 pCurTSS16->cs = pCtx->cs.Sel;
3783 pCurTSS16->ss = pCtx->ss.Sel;
3784 pCurTSS16->ds = pCtx->ds.Sel;
3785
3786 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3787 if (rcStrict != VINF_SUCCESS)
3788 {
3789 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3790 VBOXSTRICTRC_VAL(rcStrict)));
3791 return rcStrict;
3792 }
3793 }
3794
3795 /*
3796 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3797 */
3798 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3799 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3800 {
3801 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3802 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3803 pNewTSS->selPrev = pCtx->tr.Sel;
3804 }
3805
3806 /*
3807 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3808 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3809 */
3810 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3811 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3812 bool fNewDebugTrap;
3813 if (fIsNewTSS386)
3814 {
3815 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3816 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3817 uNewEip = pNewTSS32->eip;
3818 uNewEflags = pNewTSS32->eflags;
3819 uNewEax = pNewTSS32->eax;
3820 uNewEcx = pNewTSS32->ecx;
3821 uNewEdx = pNewTSS32->edx;
3822 uNewEbx = pNewTSS32->ebx;
3823 uNewEsp = pNewTSS32->esp;
3824 uNewEbp = pNewTSS32->ebp;
3825 uNewEsi = pNewTSS32->esi;
3826 uNewEdi = pNewTSS32->edi;
3827 uNewES = pNewTSS32->es;
3828 uNewCS = pNewTSS32->cs;
3829 uNewSS = pNewTSS32->ss;
3830 uNewDS = pNewTSS32->ds;
3831 uNewFS = pNewTSS32->fs;
3832 uNewGS = pNewTSS32->gs;
3833 uNewLdt = pNewTSS32->selLdt;
3834 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3835 }
3836 else
3837 {
3838 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3839 uNewCr3 = 0;
3840 uNewEip = pNewTSS16->ip;
3841 uNewEflags = pNewTSS16->flags;
3842 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3843 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3844 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3845 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3846 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3847 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3848 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3849 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3850 uNewES = pNewTSS16->es;
3851 uNewCS = pNewTSS16->cs;
3852 uNewSS = pNewTSS16->ss;
3853 uNewDS = pNewTSS16->ds;
3854 uNewFS = 0;
3855 uNewGS = 0;
3856 uNewLdt = pNewTSS16->selLdt;
3857 fNewDebugTrap = false;
3858 }
3859
3860 if (GCPtrNewTSS == GCPtrCurTSS)
3861 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3862 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3863
3864 /*
3865 * We're done accessing the new TSS.
3866 */
3867 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3868 if (rcStrict != VINF_SUCCESS)
3869 {
3870 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3871 return rcStrict;
3872 }
3873
3874 /*
3875 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3876 */
3877 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3878 {
3879 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3880 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3881 if (rcStrict != VINF_SUCCESS)
3882 {
3883 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3884 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3885 return rcStrict;
3886 }
3887
3888 /* Check that the descriptor indicates the new TSS is available (not busy). */
3889 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3890 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3891 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3892
3893 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3894 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3895 if (rcStrict != VINF_SUCCESS)
3896 {
3897 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3898 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3899 return rcStrict;
3900 }
3901 }
3902
3903 /*
3904 * From this point on, we're technically in the new task. We will defer exceptions
3905 * until the completion of the task switch but before executing any instructions in the new task.
3906 */
3907 pCtx->tr.Sel = SelTSS;
3908 pCtx->tr.ValidSel = SelTSS;
3909 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3910 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3911 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3912 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3913 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3914
3915 /* Set the busy bit in TR. */
3916 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3917 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3918 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3919 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3920 {
3921 uNewEflags |= X86_EFL_NT;
3922 }
3923
3924 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3925 pCtx->cr0 |= X86_CR0_TS;
3926 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3927
3928 pCtx->eip = uNewEip;
3929 pCtx->eax = uNewEax;
3930 pCtx->ecx = uNewEcx;
3931 pCtx->edx = uNewEdx;
3932 pCtx->ebx = uNewEbx;
3933 pCtx->esp = uNewEsp;
3934 pCtx->ebp = uNewEbp;
3935 pCtx->esi = uNewEsi;
3936 pCtx->edi = uNewEdi;
3937
3938 uNewEflags &= X86_EFL_LIVE_MASK;
3939 uNewEflags |= X86_EFL_RA1_MASK;
3940 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3941
3942 /*
3943 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3944 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3945 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3946 */
3947 pCtx->es.Sel = uNewES;
3948 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3949
3950 pCtx->cs.Sel = uNewCS;
3951 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3952
3953 pCtx->ss.Sel = uNewSS;
3954 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3955
3956 pCtx->ds.Sel = uNewDS;
3957 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3958
3959 pCtx->fs.Sel = uNewFS;
3960 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3961
3962 pCtx->gs.Sel = uNewGS;
3963 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3964 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3965
3966 pCtx->ldtr.Sel = uNewLdt;
3967 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3968 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3969 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3970
3971 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3972 {
3973 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3974 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3975 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3976 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3977 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3978 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3979 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3980 }
3981
3982 /*
3983 * Switch CR3 for the new task.
3984 */
3985 if ( fIsNewTSS386
3986 && (pCtx->cr0 & X86_CR0_PG))
3987 {
3988 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3989 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3990 {
3991 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3992 AssertRCSuccessReturn(rc, rc);
3993 }
3994 else
3995 pCtx->cr3 = uNewCr3;
3996
3997 /* Inform PGM. */
3998 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3999 {
4000 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4001 AssertRCReturn(rc, rc);
4002 /* ignore informational status codes */
4003 }
4004 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4005 }
4006
4007 /*
4008 * Switch LDTR for the new task.
4009 */
4010 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4011 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4012 else
4013 {
4014 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4015
4016 IEMSELDESC DescNewLdt;
4017 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4018 if (rcStrict != VINF_SUCCESS)
4019 {
4020 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4021 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4022 return rcStrict;
4023 }
4024 if ( !DescNewLdt.Legacy.Gen.u1Present
4025 || DescNewLdt.Legacy.Gen.u1DescType
4026 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4027 {
4028 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4029 uNewLdt, DescNewLdt.Legacy.u));
4030 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4031 }
4032
4033 pCtx->ldtr.ValidSel = uNewLdt;
4034 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4035 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4036 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4037 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4038 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4039 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4041 }
4042
4043 IEMSELDESC DescSS;
4044 if (IEM_IS_V86_MODE(pVCpu))
4045 {
4046 pVCpu->iem.s.uCpl = 3;
4047 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
4048 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
4049 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
4050 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
4051 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
4052 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
4053
4054 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4055 DescSS.Legacy.u = 0;
4056 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4057 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4058 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4059 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4060 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4061 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4062 DescSS.Legacy.Gen.u2Dpl = 3;
4063 }
4064 else
4065 {
4066 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4067
4068 /*
4069 * Load the stack segment for the new task.
4070 */
4071 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4072 {
4073 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4074 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4075 }
4076
4077 /* Fetch the descriptor. */
4078 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4079 if (rcStrict != VINF_SUCCESS)
4080 {
4081 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4082 VBOXSTRICTRC_VAL(rcStrict)));
4083 return rcStrict;
4084 }
4085
4086 /* SS must be a data segment and writable. */
4087 if ( !DescSS.Legacy.Gen.u1DescType
4088 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4089 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4090 {
4091 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4092 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4093 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4094 }
4095
4096 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4097 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4098 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4099 {
4100 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4101 uNewCpl));
4102 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4103 }
4104
4105 /* Is it there? */
4106 if (!DescSS.Legacy.Gen.u1Present)
4107 {
4108 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4109 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4110 }
4111
4112 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4113 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4114
4115 /* Set the accessed bit before committing the result into SS. */
4116 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4117 {
4118 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4119 if (rcStrict != VINF_SUCCESS)
4120 return rcStrict;
4121 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4122 }
4123
4124 /* Commit SS. */
4125 pCtx->ss.Sel = uNewSS;
4126 pCtx->ss.ValidSel = uNewSS;
4127 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4128 pCtx->ss.u32Limit = cbLimit;
4129 pCtx->ss.u64Base = u64Base;
4130 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4132
4133 /* CPL has changed, update IEM before loading rest of segments. */
4134 pVCpu->iem.s.uCpl = uNewCpl;
4135
4136 /*
4137 * Load the data segments for the new task.
4138 */
4139 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4140 if (rcStrict != VINF_SUCCESS)
4141 return rcStrict;
4142 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4143 if (rcStrict != VINF_SUCCESS)
4144 return rcStrict;
4145 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4146 if (rcStrict != VINF_SUCCESS)
4147 return rcStrict;
4148 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4149 if (rcStrict != VINF_SUCCESS)
4150 return rcStrict;
4151
4152 /*
4153 * Load the code segment for the new task.
4154 */
4155 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4156 {
4157 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4158 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4159 }
4160
4161 /* Fetch the descriptor. */
4162 IEMSELDESC DescCS;
4163 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4164 if (rcStrict != VINF_SUCCESS)
4165 {
4166 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4167 return rcStrict;
4168 }
4169
4170 /* CS must be a code segment. */
4171 if ( !DescCS.Legacy.Gen.u1DescType
4172 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4173 {
4174 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4175 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4176 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4177 }
4178
4179 /* For conforming CS, DPL must be less than or equal to the RPL. */
4180 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4181 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4182 {
4183 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4184 DescCS.Legacy.Gen.u2Dpl));
4185 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4186 }
4187
4188 /* For non-conforming CS, DPL must match RPL. */
4189 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4190 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4191 {
4192 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4193 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4194 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4195 }
4196
4197 /* Is it there? */
4198 if (!DescCS.Legacy.Gen.u1Present)
4199 {
4200 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4201 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4202 }
4203
4204 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4205 u64Base = X86DESC_BASE(&DescCS.Legacy);
4206
4207 /* Set the accessed bit before committing the result into CS. */
4208 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4209 {
4210 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4211 if (rcStrict != VINF_SUCCESS)
4212 return rcStrict;
4213 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4214 }
4215
4216 /* Commit CS. */
4217 pCtx->cs.Sel = uNewCS;
4218 pCtx->cs.ValidSel = uNewCS;
4219 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4220 pCtx->cs.u32Limit = cbLimit;
4221 pCtx->cs.u64Base = u64Base;
4222 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4224 }
4225
4226 /** @todo Debug trap. */
4227 if (fIsNewTSS386 && fNewDebugTrap)
4228 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4229
4230 /*
4231 * Construct the error code masks based on what caused this task switch.
4232 * See Intel Instruction reference for INT.
4233 */
4234 uint16_t uExt;
4235 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4236 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4237 {
4238 uExt = 1;
4239 }
4240 else
4241 uExt = 0;
4242
4243 /*
4244 * Push any error code on to the new stack.
4245 */
4246 if (fFlags & IEM_XCPT_FLAGS_ERR)
4247 {
4248 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4249 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4250 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4251
4252 /* Check that there is sufficient space on the stack. */
4253 /** @todo Factor out segment limit checking for normal/expand down segments
4254 * into a separate function. */
4255 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4256 {
4257 if ( pCtx->esp - 1 > cbLimitSS
4258 || pCtx->esp < cbStackFrame)
4259 {
4260 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4261 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4262 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4263 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4264 }
4265 }
4266 else
4267 {
4268 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4269 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4270 {
4271 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4272 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4273 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4274 }
4275 }
4276
4277
4278 if (fIsNewTSS386)
4279 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4280 else
4281 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4282 if (rcStrict != VINF_SUCCESS)
4283 {
4284 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4285 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4286 return rcStrict;
4287 }
4288 }
4289
4290 /* Check the new EIP against the new CS limit. */
4291 if (pCtx->eip > pCtx->cs.u32Limit)
4292 {
4293 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4294 pCtx->eip, pCtx->cs.u32Limit));
4295 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4296 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4297 }
4298
4299 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4300 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4301}
4302
4303
4304/**
4305 * Implements exceptions and interrupts for protected mode.
4306 *
4307 * @returns VBox strict status code.
4308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4309 * @param pCtx The CPU context.
4310 * @param cbInstr The number of bytes to offset rIP by in the return
4311 * address.
4312 * @param u8Vector The interrupt / exception vector number.
4313 * @param fFlags The flags.
4314 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4315 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4316 */
4317IEM_STATIC VBOXSTRICTRC
4318iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4319 PCPUMCTX pCtx,
4320 uint8_t cbInstr,
4321 uint8_t u8Vector,
4322 uint32_t fFlags,
4323 uint16_t uErr,
4324 uint64_t uCr2)
4325{
4326 /*
4327 * Read the IDT entry.
4328 */
4329 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4330 {
4331 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4332 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4333 }
4334 X86DESC Idte;
4335 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4336 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4337 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4338 return rcStrict;
4339 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4340 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4341 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4342
4343 /*
4344 * Check the descriptor type, DPL and such.
4345 * ASSUMES this is done in the same order as described for call-gate calls.
4346 */
4347 if (Idte.Gate.u1DescType)
4348 {
4349 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4350 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4351 }
4352 bool fTaskGate = false;
4353 uint8_t f32BitGate = true;
4354 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4355 switch (Idte.Gate.u4Type)
4356 {
4357 case X86_SEL_TYPE_SYS_UNDEFINED:
4358 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4359 case X86_SEL_TYPE_SYS_LDT:
4360 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4361 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4362 case X86_SEL_TYPE_SYS_UNDEFINED2:
4363 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4364 case X86_SEL_TYPE_SYS_UNDEFINED3:
4365 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4366 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4367 case X86_SEL_TYPE_SYS_UNDEFINED4:
4368 {
4369 /** @todo check what actually happens when the type is wrong...
4370 * esp. call gates. */
4371 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4372 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4373 }
4374
4375 case X86_SEL_TYPE_SYS_286_INT_GATE:
4376 f32BitGate = false;
4377 case X86_SEL_TYPE_SYS_386_INT_GATE:
4378 fEflToClear |= X86_EFL_IF;
4379 break;
4380
4381 case X86_SEL_TYPE_SYS_TASK_GATE:
4382 fTaskGate = true;
4383#ifndef IEM_IMPLEMENTS_TASKSWITCH
4384 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4385#endif
4386 break;
4387
4388 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4389 f32BitGate = false;
4390 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4391 break;
4392
4393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4394 }
4395
4396 /* Check DPL against CPL if applicable. */
4397 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4398 {
4399 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4400 {
4401 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4402 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4403 }
4404 }
4405
4406 /* Is it there? */
4407 if (!Idte.Gate.u1Present)
4408 {
4409 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4410 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4411 }
4412
4413 /* Is it a task-gate? */
4414 if (fTaskGate)
4415 {
4416 /*
4417 * Construct the error code masks based on what caused this task switch.
4418 * See Intel Instruction reference for INT.
4419 */
4420 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4421 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4422 RTSEL SelTSS = Idte.Gate.u16Sel;
4423
4424 /*
4425 * Fetch the TSS descriptor in the GDT.
4426 */
4427 IEMSELDESC DescTSS;
4428 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4429 if (rcStrict != VINF_SUCCESS)
4430 {
4431 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4432 VBOXSTRICTRC_VAL(rcStrict)));
4433 return rcStrict;
4434 }
4435
4436 /* The TSS descriptor must be a system segment and be available (not busy). */
4437 if ( DescTSS.Legacy.Gen.u1DescType
4438 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4439 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4440 {
4441 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4442 u8Vector, SelTSS, DescTSS.Legacy.au64));
4443 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4444 }
4445
4446 /* The TSS must be present. */
4447 if (!DescTSS.Legacy.Gen.u1Present)
4448 {
4449 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4450 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4451 }
4452
4453 /* Do the actual task switch. */
4454 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4455 }
4456
4457 /* A null CS is bad. */
4458 RTSEL NewCS = Idte.Gate.u16Sel;
4459 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4460 {
4461 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4462 return iemRaiseGeneralProtectionFault0(pVCpu);
4463 }
4464
4465 /* Fetch the descriptor for the new CS. */
4466 IEMSELDESC DescCS;
4467 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4468 if (rcStrict != VINF_SUCCESS)
4469 {
4470 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4471 return rcStrict;
4472 }
4473
4474 /* Must be a code segment. */
4475 if (!DescCS.Legacy.Gen.u1DescType)
4476 {
4477 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4478 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4479 }
4480 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4481 {
4482 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4483 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4484 }
4485
4486 /* Don't allow lowering the privilege level. */
4487 /** @todo Does the lowering of privileges apply to software interrupts
4488 * only? This has bearings on the more-privileged or
4489 * same-privilege stack behavior further down. A testcase would
4490 * be nice. */
4491 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4492 {
4493 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4494 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4495 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4496 }
4497
4498 /* Make sure the selector is present. */
4499 if (!DescCS.Legacy.Gen.u1Present)
4500 {
4501 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4502 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4503 }
4504
4505 /* Check the new EIP against the new CS limit. */
4506 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4507 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4508 ? Idte.Gate.u16OffsetLow
4509 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4510 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4511 if (uNewEip > cbLimitCS)
4512 {
4513 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4514 u8Vector, uNewEip, cbLimitCS, NewCS));
4515 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4516 }
4517
4518 /* Calc the flag image to push. */
4519 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4520 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4521 fEfl &= ~X86_EFL_RF;
4522 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4523 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4524
4525 /* From V8086 mode only go to CPL 0. */
4526 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4527 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4528 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4529 {
4530 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4531 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4532 }
4533
4534 /*
4535 * If the privilege level changes, we need to get a new stack from the TSS.
4536 * This in turns means validating the new SS and ESP...
4537 */
4538 if (uNewCpl != pVCpu->iem.s.uCpl)
4539 {
4540 RTSEL NewSS;
4541 uint32_t uNewEsp;
4542 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4543 if (rcStrict != VINF_SUCCESS)
4544 return rcStrict;
4545
4546 IEMSELDESC DescSS;
4547 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4548 if (rcStrict != VINF_SUCCESS)
4549 return rcStrict;
4550
4551 /* Check that there is sufficient space for the stack frame. */
4552 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4553 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4554 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4555 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4556
4557 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4558 {
4559 if ( uNewEsp - 1 > cbLimitSS
4560 || uNewEsp < cbStackFrame)
4561 {
4562 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4563 u8Vector, NewSS, uNewEsp, cbStackFrame));
4564 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4565 }
4566 }
4567 else
4568 {
4569 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4570 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4571 {
4572 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4573 u8Vector, NewSS, uNewEsp, cbStackFrame));
4574 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4575 }
4576 }
4577
4578 /*
4579 * Start making changes.
4580 */
4581
4582 /* Set the new CPL so that stack accesses use it. */
4583 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4584 pVCpu->iem.s.uCpl = uNewCpl;
4585
4586 /* Create the stack frame. */
4587 RTPTRUNION uStackFrame;
4588 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4589 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4590 if (rcStrict != VINF_SUCCESS)
4591 return rcStrict;
4592 void * const pvStackFrame = uStackFrame.pv;
4593 if (f32BitGate)
4594 {
4595 if (fFlags & IEM_XCPT_FLAGS_ERR)
4596 *uStackFrame.pu32++ = uErr;
4597 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4598 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4599 uStackFrame.pu32[2] = fEfl;
4600 uStackFrame.pu32[3] = pCtx->esp;
4601 uStackFrame.pu32[4] = pCtx->ss.Sel;
4602 if (fEfl & X86_EFL_VM)
4603 {
4604 uStackFrame.pu32[1] = pCtx->cs.Sel;
4605 uStackFrame.pu32[5] = pCtx->es.Sel;
4606 uStackFrame.pu32[6] = pCtx->ds.Sel;
4607 uStackFrame.pu32[7] = pCtx->fs.Sel;
4608 uStackFrame.pu32[8] = pCtx->gs.Sel;
4609 }
4610 }
4611 else
4612 {
4613 if (fFlags & IEM_XCPT_FLAGS_ERR)
4614 *uStackFrame.pu16++ = uErr;
4615 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4616 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4617 uStackFrame.pu16[2] = fEfl;
4618 uStackFrame.pu16[3] = pCtx->sp;
4619 uStackFrame.pu16[4] = pCtx->ss.Sel;
4620 if (fEfl & X86_EFL_VM)
4621 {
4622 uStackFrame.pu16[1] = pCtx->cs.Sel;
4623 uStackFrame.pu16[5] = pCtx->es.Sel;
4624 uStackFrame.pu16[6] = pCtx->ds.Sel;
4625 uStackFrame.pu16[7] = pCtx->fs.Sel;
4626 uStackFrame.pu16[8] = pCtx->gs.Sel;
4627 }
4628 }
4629 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4630 if (rcStrict != VINF_SUCCESS)
4631 return rcStrict;
4632
4633 /* Mark the selectors 'accessed' (hope this is the correct time). */
4634 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4635 * after pushing the stack frame? (Write protect the gdt + stack to
4636 * find out.) */
4637 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4638 {
4639 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4640 if (rcStrict != VINF_SUCCESS)
4641 return rcStrict;
4642 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4643 }
4644
4645 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4646 {
4647 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4648 if (rcStrict != VINF_SUCCESS)
4649 return rcStrict;
4650 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4651 }
4652
4653 /*
4654 * Start comitting the register changes (joins with the DPL=CPL branch).
4655 */
4656 pCtx->ss.Sel = NewSS;
4657 pCtx->ss.ValidSel = NewSS;
4658 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4659 pCtx->ss.u32Limit = cbLimitSS;
4660 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4661 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4662 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4663 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4664 * SP is loaded).
4665 * Need to check the other combinations too:
4666 * - 16-bit TSS, 32-bit handler
4667 * - 32-bit TSS, 16-bit handler */
4668 if (!pCtx->ss.Attr.n.u1DefBig)
4669 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4670 else
4671 pCtx->rsp = uNewEsp - cbStackFrame;
4672
4673 if (fEfl & X86_EFL_VM)
4674 {
4675 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4676 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4677 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4678 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4679 }
4680 }
4681 /*
4682 * Same privilege, no stack change and smaller stack frame.
4683 */
4684 else
4685 {
4686 uint64_t uNewRsp;
4687 RTPTRUNION uStackFrame;
4688 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4689 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4690 if (rcStrict != VINF_SUCCESS)
4691 return rcStrict;
4692 void * const pvStackFrame = uStackFrame.pv;
4693
4694 if (f32BitGate)
4695 {
4696 if (fFlags & IEM_XCPT_FLAGS_ERR)
4697 *uStackFrame.pu32++ = uErr;
4698 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4699 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4700 uStackFrame.pu32[2] = fEfl;
4701 }
4702 else
4703 {
4704 if (fFlags & IEM_XCPT_FLAGS_ERR)
4705 *uStackFrame.pu16++ = uErr;
4706 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4707 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4708 uStackFrame.pu16[2] = fEfl;
4709 }
4710 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4711 if (rcStrict != VINF_SUCCESS)
4712 return rcStrict;
4713
4714 /* Mark the CS selector as 'accessed'. */
4715 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4716 {
4717 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4718 if (rcStrict != VINF_SUCCESS)
4719 return rcStrict;
4720 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4721 }
4722
4723 /*
4724 * Start committing the register changes (joins with the other branch).
4725 */
4726 pCtx->rsp = uNewRsp;
4727 }
4728
4729 /* ... register committing continues. */
4730 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4731 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4732 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4733 pCtx->cs.u32Limit = cbLimitCS;
4734 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4735 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4736
4737 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4738 fEfl &= ~fEflToClear;
4739 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4740
4741 if (fFlags & IEM_XCPT_FLAGS_CR2)
4742 pCtx->cr2 = uCr2;
4743
4744 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4745 iemRaiseXcptAdjustState(pCtx, u8Vector);
4746
4747 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4748}
4749
4750
4751/**
4752 * Implements exceptions and interrupts for long mode.
4753 *
4754 * @returns VBox strict status code.
4755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4756 * @param pCtx The CPU context.
4757 * @param cbInstr The number of bytes to offset rIP by in the return
4758 * address.
4759 * @param u8Vector The interrupt / exception vector number.
4760 * @param fFlags The flags.
4761 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4762 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4763 */
4764IEM_STATIC VBOXSTRICTRC
4765iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4766 PCPUMCTX pCtx,
4767 uint8_t cbInstr,
4768 uint8_t u8Vector,
4769 uint32_t fFlags,
4770 uint16_t uErr,
4771 uint64_t uCr2)
4772{
4773 /*
4774 * Read the IDT entry.
4775 */
4776 uint16_t offIdt = (uint16_t)u8Vector << 4;
4777 if (pCtx->idtr.cbIdt < offIdt + 7)
4778 {
4779 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4780 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4781 }
4782 X86DESC64 Idte;
4783 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4784 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4785 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4786 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4787 return rcStrict;
4788 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4789 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4790 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4791
4792 /*
4793 * Check the descriptor type, DPL and such.
4794 * ASSUMES this is done in the same order as described for call-gate calls.
4795 */
4796 if (Idte.Gate.u1DescType)
4797 {
4798 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4799 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4800 }
4801 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4802 switch (Idte.Gate.u4Type)
4803 {
4804 case AMD64_SEL_TYPE_SYS_INT_GATE:
4805 fEflToClear |= X86_EFL_IF;
4806 break;
4807 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4808 break;
4809
4810 default:
4811 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4812 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4813 }
4814
4815 /* Check DPL against CPL if applicable. */
4816 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4817 {
4818 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4819 {
4820 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4821 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4822 }
4823 }
4824
4825 /* Is it there? */
4826 if (!Idte.Gate.u1Present)
4827 {
4828 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4829 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4830 }
4831
4832 /* A null CS is bad. */
4833 RTSEL NewCS = Idte.Gate.u16Sel;
4834 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4835 {
4836 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4837 return iemRaiseGeneralProtectionFault0(pVCpu);
4838 }
4839
4840 /* Fetch the descriptor for the new CS. */
4841 IEMSELDESC DescCS;
4842 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4843 if (rcStrict != VINF_SUCCESS)
4844 {
4845 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4846 return rcStrict;
4847 }
4848
4849 /* Must be a 64-bit code segment. */
4850 if (!DescCS.Long.Gen.u1DescType)
4851 {
4852 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4853 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4854 }
4855 if ( !DescCS.Long.Gen.u1Long
4856 || DescCS.Long.Gen.u1DefBig
4857 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4858 {
4859 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4860 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4861 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4862 }
4863
4864 /* Don't allow lowering the privilege level. For non-conforming CS
4865 selectors, the CS.DPL sets the privilege level the trap/interrupt
4866 handler runs at. For conforming CS selectors, the CPL remains
4867 unchanged, but the CS.DPL must be <= CPL. */
4868 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4869 * when CPU in Ring-0. Result \#GP? */
4870 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4871 {
4872 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4873 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4874 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4875 }
4876
4877
4878 /* Make sure the selector is present. */
4879 if (!DescCS.Legacy.Gen.u1Present)
4880 {
4881 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4882 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4883 }
4884
4885 /* Check that the new RIP is canonical. */
4886 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4887 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4888 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4889 if (!IEM_IS_CANONICAL(uNewRip))
4890 {
4891 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4892 return iemRaiseGeneralProtectionFault0(pVCpu);
4893 }
4894
4895 /*
4896 * If the privilege level changes or if the IST isn't zero, we need to get
4897 * a new stack from the TSS.
4898 */
4899 uint64_t uNewRsp;
4900 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4901 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4902 if ( uNewCpl != pVCpu->iem.s.uCpl
4903 || Idte.Gate.u3IST != 0)
4904 {
4905 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4906 if (rcStrict != VINF_SUCCESS)
4907 return rcStrict;
4908 }
4909 else
4910 uNewRsp = pCtx->rsp;
4911 uNewRsp &= ~(uint64_t)0xf;
4912
4913 /*
4914 * Calc the flag image to push.
4915 */
4916 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4917 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4918 fEfl &= ~X86_EFL_RF;
4919 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4920 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4921
4922 /*
4923 * Start making changes.
4924 */
4925 /* Set the new CPL so that stack accesses use it. */
4926 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4927 pVCpu->iem.s.uCpl = uNewCpl;
4928
4929 /* Create the stack frame. */
4930 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4931 RTPTRUNION uStackFrame;
4932 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4933 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4934 if (rcStrict != VINF_SUCCESS)
4935 return rcStrict;
4936 void * const pvStackFrame = uStackFrame.pv;
4937
4938 if (fFlags & IEM_XCPT_FLAGS_ERR)
4939 *uStackFrame.pu64++ = uErr;
4940 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4941 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4942 uStackFrame.pu64[2] = fEfl;
4943 uStackFrame.pu64[3] = pCtx->rsp;
4944 uStackFrame.pu64[4] = pCtx->ss.Sel;
4945 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4946 if (rcStrict != VINF_SUCCESS)
4947 return rcStrict;
4948
4949 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4950 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4951 * after pushing the stack frame? (Write protect the gdt + stack to
4952 * find out.) */
4953 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4954 {
4955 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4956 if (rcStrict != VINF_SUCCESS)
4957 return rcStrict;
4958 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4959 }
4960
4961 /*
4962 * Start comitting the register changes.
4963 */
4964 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4965 * hidden registers when interrupting 32-bit or 16-bit code! */
4966 if (uNewCpl != uOldCpl)
4967 {
4968 pCtx->ss.Sel = 0 | uNewCpl;
4969 pCtx->ss.ValidSel = 0 | uNewCpl;
4970 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4971 pCtx->ss.u32Limit = UINT32_MAX;
4972 pCtx->ss.u64Base = 0;
4973 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4974 }
4975 pCtx->rsp = uNewRsp - cbStackFrame;
4976 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4977 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4978 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4979 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4980 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4981 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4982 pCtx->rip = uNewRip;
4983
4984 fEfl &= ~fEflToClear;
4985 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4986
4987 if (fFlags & IEM_XCPT_FLAGS_CR2)
4988 pCtx->cr2 = uCr2;
4989
4990 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4991 iemRaiseXcptAdjustState(pCtx, u8Vector);
4992
4993 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4994}
4995
4996
4997/**
4998 * Implements exceptions and interrupts.
4999 *
5000 * All exceptions and interrupts goes thru this function!
5001 *
5002 * @returns VBox strict status code.
5003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5004 * @param cbInstr The number of bytes to offset rIP by in the return
5005 * address.
5006 * @param u8Vector The interrupt / exception vector number.
5007 * @param fFlags The flags.
5008 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5009 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5010 */
5011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5012iemRaiseXcptOrInt(PVMCPU pVCpu,
5013 uint8_t cbInstr,
5014 uint8_t u8Vector,
5015 uint32_t fFlags,
5016 uint16_t uErr,
5017 uint64_t uCr2)
5018{
5019 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5020#ifdef IN_RING0
5021 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5022 AssertRCReturn(rc, rc);
5023#endif
5024
5025#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5026 /*
5027 * Flush prefetch buffer
5028 */
5029 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5030#endif
5031
5032 /*
5033 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5034 */
5035 if ( pCtx->eflags.Bits.u1VM
5036 && pCtx->eflags.Bits.u2IOPL != 3
5037 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5038 && (pCtx->cr0 & X86_CR0_PE) )
5039 {
5040 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5041 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5042 u8Vector = X86_XCPT_GP;
5043 uErr = 0;
5044 }
5045#ifdef DBGFTRACE_ENABLED
5046 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5047 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5048 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5049#endif
5050
5051 /*
5052 * Do recursion accounting.
5053 */
5054 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5055 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5056 if (pVCpu->iem.s.cXcptRecursions == 0)
5057 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5058 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5059 else
5060 {
5061 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5062 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5063
5064 /** @todo double and tripple faults. */
5065 if (pVCpu->iem.s.cXcptRecursions >= 3)
5066 {
5067#ifdef DEBUG_bird
5068 AssertFailed();
5069#endif
5070 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5071 }
5072
5073 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5074 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5075 {
5076 ....
5077 } */
5078 }
5079 pVCpu->iem.s.cXcptRecursions++;
5080 pVCpu->iem.s.uCurXcpt = u8Vector;
5081 pVCpu->iem.s.fCurXcpt = fFlags;
5082
5083 /*
5084 * Extensive logging.
5085 */
5086#if defined(LOG_ENABLED) && defined(IN_RING3)
5087 if (LogIs3Enabled())
5088 {
5089 PVM pVM = pVCpu->CTX_SUFF(pVM);
5090 char szRegs[4096];
5091 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5092 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5093 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5094 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5095 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5096 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5097 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5098 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5099 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5100 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5101 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5102 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5103 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5104 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5105 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5106 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5107 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5108 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5109 " efer=%016VR{efer}\n"
5110 " pat=%016VR{pat}\n"
5111 " sf_mask=%016VR{sf_mask}\n"
5112 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5113 " lstar=%016VR{lstar}\n"
5114 " star=%016VR{star} cstar=%016VR{cstar}\n"
5115 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5116 );
5117
5118 char szInstr[256];
5119 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5120 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5121 szInstr, sizeof(szInstr), NULL);
5122 Log3(("%s%s\n", szRegs, szInstr));
5123 }
5124#endif /* LOG_ENABLED */
5125
5126 /*
5127 * Call the mode specific worker function.
5128 */
5129 VBOXSTRICTRC rcStrict;
5130 if (!(pCtx->cr0 & X86_CR0_PE))
5131 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5132 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5133 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5134 else
5135 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5136
5137 /* Flush the prefetch buffer. */
5138#ifdef IEM_WITH_CODE_TLB
5139 pVCpu->iem.s.pbInstrBuf = NULL;
5140#else
5141 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5142#endif
5143
5144 /*
5145 * Unwind.
5146 */
5147 pVCpu->iem.s.cXcptRecursions--;
5148 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5149 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5150 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5151 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5152 return rcStrict;
5153}
5154
5155#ifdef IEM_WITH_SETJMP
5156/**
5157 * See iemRaiseXcptOrInt. Will not return.
5158 */
5159IEM_STATIC DECL_NO_RETURN(void)
5160iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5161 uint8_t cbInstr,
5162 uint8_t u8Vector,
5163 uint32_t fFlags,
5164 uint16_t uErr,
5165 uint64_t uCr2)
5166{
5167 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5168 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5169}
5170#endif
5171
5172
5173/** \#DE - 00. */
5174DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5175{
5176 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5177}
5178
5179
5180/** \#DB - 01.
5181 * @note This automatically clear DR7.GD. */
5182DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5183{
5184 /** @todo set/clear RF. */
5185 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5186 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5187}
5188
5189
5190/** \#UD - 06. */
5191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5192{
5193 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5194}
5195
5196
5197/** \#NM - 07. */
5198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5199{
5200 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5201}
5202
5203
5204/** \#TS(err) - 0a. */
5205DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5206{
5207 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5208}
5209
5210
5211/** \#TS(tr) - 0a. */
5212DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5213{
5214 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5215 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5216}
5217
5218
5219/** \#TS(0) - 0a. */
5220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5221{
5222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5223 0, 0);
5224}
5225
5226
5227/** \#TS(err) - 0a. */
5228DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5229{
5230 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5231 uSel & X86_SEL_MASK_OFF_RPL, 0);
5232}
5233
5234
5235/** \#NP(err) - 0b. */
5236DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5237{
5238 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5239}
5240
5241
5242/** \#NP(seg) - 0b. */
5243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5244{
5245 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5246 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5247}
5248
5249
5250/** \#NP(sel) - 0b. */
5251DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5252{
5253 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5254 uSel & ~X86_SEL_RPL, 0);
5255}
5256
5257
5258/** \#SS(seg) - 0c. */
5259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5260{
5261 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5262 uSel & ~X86_SEL_RPL, 0);
5263}
5264
5265
5266/** \#SS(err) - 0c. */
5267DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5268{
5269 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5270}
5271
5272
5273/** \#GP(n) - 0d. */
5274DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5275{
5276 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5277}
5278
5279
5280/** \#GP(0) - 0d. */
5281DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5282{
5283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5284}
5285
5286#ifdef IEM_WITH_SETJMP
5287/** \#GP(0) - 0d. */
5288DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5289{
5290 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5291}
5292#endif
5293
5294
5295/** \#GP(sel) - 0d. */
5296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5297{
5298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5299 Sel & ~X86_SEL_RPL, 0);
5300}
5301
5302
5303/** \#GP(0) - 0d. */
5304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5305{
5306 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5307}
5308
5309
5310/** \#GP(sel) - 0d. */
5311DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5312{
5313 NOREF(iSegReg); NOREF(fAccess);
5314 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5315 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5316}
5317
5318#ifdef IEM_WITH_SETJMP
5319/** \#GP(sel) - 0d, longjmp. */
5320DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5321{
5322 NOREF(iSegReg); NOREF(fAccess);
5323 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5324 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5325}
5326#endif
5327
5328/** \#GP(sel) - 0d. */
5329DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5330{
5331 NOREF(Sel);
5332 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5333}
5334
5335#ifdef IEM_WITH_SETJMP
5336/** \#GP(sel) - 0d, longjmp. */
5337DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5338{
5339 NOREF(Sel);
5340 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5341}
5342#endif
5343
5344
5345/** \#GP(sel) - 0d. */
5346DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5347{
5348 NOREF(iSegReg); NOREF(fAccess);
5349 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5350}
5351
5352#ifdef IEM_WITH_SETJMP
5353/** \#GP(sel) - 0d, longjmp. */
5354DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5355 uint32_t fAccess)
5356{
5357 NOREF(iSegReg); NOREF(fAccess);
5358 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5359}
5360#endif
5361
5362
5363/** \#PF(n) - 0e. */
5364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5365{
5366 uint16_t uErr;
5367 switch (rc)
5368 {
5369 case VERR_PAGE_NOT_PRESENT:
5370 case VERR_PAGE_TABLE_NOT_PRESENT:
5371 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5372 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5373 uErr = 0;
5374 break;
5375
5376 default:
5377 AssertMsgFailed(("%Rrc\n", rc));
5378 case VERR_ACCESS_DENIED:
5379 uErr = X86_TRAP_PF_P;
5380 break;
5381
5382 /** @todo reserved */
5383 }
5384
5385 if (pVCpu->iem.s.uCpl == 3)
5386 uErr |= X86_TRAP_PF_US;
5387
5388 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5389 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5390 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5391 uErr |= X86_TRAP_PF_ID;
5392
5393#if 0 /* This is so much non-sense, really. Why was it done like that? */
5394 /* Note! RW access callers reporting a WRITE protection fault, will clear
5395 the READ flag before calling. So, read-modify-write accesses (RW)
5396 can safely be reported as READ faults. */
5397 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5398 uErr |= X86_TRAP_PF_RW;
5399#else
5400 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5401 {
5402 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5403 uErr |= X86_TRAP_PF_RW;
5404 }
5405#endif
5406
5407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5408 uErr, GCPtrWhere);
5409}
5410
5411#ifdef IEM_WITH_SETJMP
5412/** \#PF(n) - 0e, longjmp. */
5413IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5414{
5415 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5416}
5417#endif
5418
5419
5420/** \#MF(0) - 10. */
5421DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5422{
5423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5424}
5425
5426
5427/** \#AC(0) - 11. */
5428DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5429{
5430 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5431}
5432
5433
5434/**
5435 * Macro for calling iemCImplRaiseDivideError().
5436 *
5437 * This enables us to add/remove arguments and force different levels of
5438 * inlining as we wish.
5439 *
5440 * @return Strict VBox status code.
5441 */
5442#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5443IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5444{
5445 NOREF(cbInstr);
5446 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5447}
5448
5449
5450/**
5451 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5452 *
5453 * This enables us to add/remove arguments and force different levels of
5454 * inlining as we wish.
5455 *
5456 * @return Strict VBox status code.
5457 */
5458#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5459IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5460{
5461 NOREF(cbInstr);
5462 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5463}
5464
5465
5466/**
5467 * Macro for calling iemCImplRaiseInvalidOpcode().
5468 *
5469 * This enables us to add/remove arguments and force different levels of
5470 * inlining as we wish.
5471 *
5472 * @return Strict VBox status code.
5473 */
5474#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5475IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5476{
5477 NOREF(cbInstr);
5478 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5479}
5480
5481
5482/** @} */
5483
5484
5485/*
5486 *
5487 * Helpers routines.
5488 * Helpers routines.
5489 * Helpers routines.
5490 *
5491 */
5492
5493/**
5494 * Recalculates the effective operand size.
5495 *
5496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5497 */
5498IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5499{
5500 switch (pVCpu->iem.s.enmCpuMode)
5501 {
5502 case IEMMODE_16BIT:
5503 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5504 break;
5505 case IEMMODE_32BIT:
5506 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5507 break;
5508 case IEMMODE_64BIT:
5509 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5510 {
5511 case 0:
5512 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5513 break;
5514 case IEM_OP_PRF_SIZE_OP:
5515 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5516 break;
5517 case IEM_OP_PRF_SIZE_REX_W:
5518 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5519 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5520 break;
5521 }
5522 break;
5523 default:
5524 AssertFailed();
5525 }
5526}
5527
5528
5529/**
5530 * Sets the default operand size to 64-bit and recalculates the effective
5531 * operand size.
5532 *
5533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5534 */
5535IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5536{
5537 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5538 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5539 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5540 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5541 else
5542 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5543}
5544
5545
5546/*
5547 *
5548 * Common opcode decoders.
5549 * Common opcode decoders.
5550 * Common opcode decoders.
5551 *
5552 */
5553//#include <iprt/mem.h>
5554
5555/**
5556 * Used to add extra details about a stub case.
5557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5558 */
5559IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5560{
5561#if defined(LOG_ENABLED) && defined(IN_RING3)
5562 PVM pVM = pVCpu->CTX_SUFF(pVM);
5563 char szRegs[4096];
5564 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5565 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5566 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5567 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5568 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5569 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5570 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5571 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5572 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5573 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5574 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5575 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5576 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5577 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5578 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5579 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5580 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5581 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5582 " efer=%016VR{efer}\n"
5583 " pat=%016VR{pat}\n"
5584 " sf_mask=%016VR{sf_mask}\n"
5585 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5586 " lstar=%016VR{lstar}\n"
5587 " star=%016VR{star} cstar=%016VR{cstar}\n"
5588 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5589 );
5590
5591 char szInstr[256];
5592 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5593 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5594 szInstr, sizeof(szInstr), NULL);
5595
5596 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5597#else
5598 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5599#endif
5600}
5601
5602/**
5603 * Complains about a stub.
5604 *
5605 * Providing two versions of this macro, one for daily use and one for use when
5606 * working on IEM.
5607 */
5608#if 0
5609# define IEMOP_BITCH_ABOUT_STUB() \
5610 do { \
5611 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5612 iemOpStubMsg2(pVCpu); \
5613 RTAssertPanic(); \
5614 } while (0)
5615#else
5616# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5617#endif
5618
5619/** Stubs an opcode. */
5620#define FNIEMOP_STUB(a_Name) \
5621 FNIEMOP_DEF(a_Name) \
5622 { \
5623 IEMOP_BITCH_ABOUT_STUB(); \
5624 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5625 } \
5626 typedef int ignore_semicolon
5627
5628/** Stubs an opcode. */
5629#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5630 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5631 { \
5632 IEMOP_BITCH_ABOUT_STUB(); \
5633 NOREF(a_Name0); \
5634 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5635 } \
5636 typedef int ignore_semicolon
5637
5638/** Stubs an opcode which currently should raise \#UD. */
5639#define FNIEMOP_UD_STUB(a_Name) \
5640 FNIEMOP_DEF(a_Name) \
5641 { \
5642 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5643 return IEMOP_RAISE_INVALID_OPCODE(); \
5644 } \
5645 typedef int ignore_semicolon
5646
5647/** Stubs an opcode which currently should raise \#UD. */
5648#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5649 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5650 { \
5651 NOREF(a_Name0); \
5652 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5653 return IEMOP_RAISE_INVALID_OPCODE(); \
5654 } \
5655 typedef int ignore_semicolon
5656
5657
5658
5659/** @name Register Access.
5660 * @{
5661 */
5662
5663/**
5664 * Gets a reference (pointer) to the specified hidden segment register.
5665 *
5666 * @returns Hidden register reference.
5667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5668 * @param iSegReg The segment register.
5669 */
5670IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5671{
5672 Assert(iSegReg < X86_SREG_COUNT);
5673 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5674 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5675
5676#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5677 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5678 { /* likely */ }
5679 else
5680 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5681#else
5682 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5683#endif
5684 return pSReg;
5685}
5686
5687
5688/**
5689 * Ensures that the given hidden segment register is up to date.
5690 *
5691 * @returns Hidden register reference.
5692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5693 * @param pSReg The segment register.
5694 */
5695IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5696{
5697#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5698 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5699 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5700#else
5701 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5702 NOREF(pVCpu);
5703#endif
5704 return pSReg;
5705}
5706
5707
5708/**
5709 * Gets a reference (pointer) to the specified segment register (the selector
5710 * value).
5711 *
5712 * @returns Pointer to the selector variable.
5713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5714 * @param iSegReg The segment register.
5715 */
5716DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5717{
5718 Assert(iSegReg < X86_SREG_COUNT);
5719 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5720 return &pCtx->aSRegs[iSegReg].Sel;
5721}
5722
5723
5724/**
5725 * Fetches the selector value of a segment register.
5726 *
5727 * @returns The selector value.
5728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5729 * @param iSegReg The segment register.
5730 */
5731DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5732{
5733 Assert(iSegReg < X86_SREG_COUNT);
5734 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5735}
5736
5737
5738/**
5739 * Gets a reference (pointer) to the specified general purpose register.
5740 *
5741 * @returns Register reference.
5742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5743 * @param iReg The general purpose register.
5744 */
5745DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5746{
5747 Assert(iReg < 16);
5748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5749 return &pCtx->aGRegs[iReg];
5750}
5751
5752
5753/**
5754 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5755 *
5756 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5757 *
5758 * @returns Register reference.
5759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5760 * @param iReg The register.
5761 */
5762DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5763{
5764 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5765 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5766 {
5767 Assert(iReg < 16);
5768 return &pCtx->aGRegs[iReg].u8;
5769 }
5770 /* high 8-bit register. */
5771 Assert(iReg < 8);
5772 return &pCtx->aGRegs[iReg & 3].bHi;
5773}
5774
5775
5776/**
5777 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5778 *
5779 * @returns Register reference.
5780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5781 * @param iReg The register.
5782 */
5783DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5784{
5785 Assert(iReg < 16);
5786 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5787 return &pCtx->aGRegs[iReg].u16;
5788}
5789
5790
5791/**
5792 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5793 *
5794 * @returns Register reference.
5795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5796 * @param iReg The register.
5797 */
5798DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5799{
5800 Assert(iReg < 16);
5801 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5802 return &pCtx->aGRegs[iReg].u32;
5803}
5804
5805
5806/**
5807 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5808 *
5809 * @returns Register reference.
5810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5811 * @param iReg The register.
5812 */
5813DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5814{
5815 Assert(iReg < 64);
5816 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5817 return &pCtx->aGRegs[iReg].u64;
5818}
5819
5820
5821/**
5822 * Fetches the value of a 8-bit general purpose register.
5823 *
5824 * @returns The register value.
5825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5826 * @param iReg The register.
5827 */
5828DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5829{
5830 return *iemGRegRefU8(pVCpu, iReg);
5831}
5832
5833
5834/**
5835 * Fetches the value of a 16-bit general purpose register.
5836 *
5837 * @returns The register value.
5838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5839 * @param iReg The register.
5840 */
5841DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5842{
5843 Assert(iReg < 16);
5844 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5845}
5846
5847
5848/**
5849 * Fetches the value of a 32-bit general purpose register.
5850 *
5851 * @returns The register value.
5852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5853 * @param iReg The register.
5854 */
5855DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5856{
5857 Assert(iReg < 16);
5858 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5859}
5860
5861
5862/**
5863 * Fetches the value of a 64-bit general purpose register.
5864 *
5865 * @returns The register value.
5866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5867 * @param iReg The register.
5868 */
5869DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5870{
5871 Assert(iReg < 16);
5872 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5873}
5874
5875
5876/**
5877 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5878 *
5879 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5880 * segment limit.
5881 *
5882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5883 * @param offNextInstr The offset of the next instruction.
5884 */
5885IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5886{
5887 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5888 switch (pVCpu->iem.s.enmEffOpSize)
5889 {
5890 case IEMMODE_16BIT:
5891 {
5892 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5893 if ( uNewIp > pCtx->cs.u32Limit
5894 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5895 return iemRaiseGeneralProtectionFault0(pVCpu);
5896 pCtx->rip = uNewIp;
5897 break;
5898 }
5899
5900 case IEMMODE_32BIT:
5901 {
5902 Assert(pCtx->rip <= UINT32_MAX);
5903 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5904
5905 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5906 if (uNewEip > pCtx->cs.u32Limit)
5907 return iemRaiseGeneralProtectionFault0(pVCpu);
5908 pCtx->rip = uNewEip;
5909 break;
5910 }
5911
5912 case IEMMODE_64BIT:
5913 {
5914 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5915
5916 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5917 if (!IEM_IS_CANONICAL(uNewRip))
5918 return iemRaiseGeneralProtectionFault0(pVCpu);
5919 pCtx->rip = uNewRip;
5920 break;
5921 }
5922
5923 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5924 }
5925
5926 pCtx->eflags.Bits.u1RF = 0;
5927
5928#ifndef IEM_WITH_CODE_TLB
5929 /* Flush the prefetch buffer. */
5930 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5931#endif
5932
5933 return VINF_SUCCESS;
5934}
5935
5936
5937/**
5938 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5939 *
5940 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5941 * segment limit.
5942 *
5943 * @returns Strict VBox status code.
5944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5945 * @param offNextInstr The offset of the next instruction.
5946 */
5947IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5948{
5949 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5950 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5951
5952 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5953 if ( uNewIp > pCtx->cs.u32Limit
5954 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5955 return iemRaiseGeneralProtectionFault0(pVCpu);
5956 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5957 pCtx->rip = uNewIp;
5958 pCtx->eflags.Bits.u1RF = 0;
5959
5960#ifndef IEM_WITH_CODE_TLB
5961 /* Flush the prefetch buffer. */
5962 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5963#endif
5964
5965 return VINF_SUCCESS;
5966}
5967
5968
5969/**
5970 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5971 *
5972 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5973 * segment limit.
5974 *
5975 * @returns Strict VBox status code.
5976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5977 * @param offNextInstr The offset of the next instruction.
5978 */
5979IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5980{
5981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5982 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5983
5984 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5985 {
5986 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5987
5988 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5989 if (uNewEip > pCtx->cs.u32Limit)
5990 return iemRaiseGeneralProtectionFault0(pVCpu);
5991 pCtx->rip = uNewEip;
5992 }
5993 else
5994 {
5995 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5996
5997 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5998 if (!IEM_IS_CANONICAL(uNewRip))
5999 return iemRaiseGeneralProtectionFault0(pVCpu);
6000 pCtx->rip = uNewRip;
6001 }
6002 pCtx->eflags.Bits.u1RF = 0;
6003
6004#ifndef IEM_WITH_CODE_TLB
6005 /* Flush the prefetch buffer. */
6006 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6007#endif
6008
6009 return VINF_SUCCESS;
6010}
6011
6012
6013/**
6014 * Performs a near jump to the specified address.
6015 *
6016 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6017 * segment limit.
6018 *
6019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6020 * @param uNewRip The new RIP value.
6021 */
6022IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6023{
6024 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6025 switch (pVCpu->iem.s.enmEffOpSize)
6026 {
6027 case IEMMODE_16BIT:
6028 {
6029 Assert(uNewRip <= UINT16_MAX);
6030 if ( uNewRip > pCtx->cs.u32Limit
6031 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6032 return iemRaiseGeneralProtectionFault0(pVCpu);
6033 /** @todo Test 16-bit jump in 64-bit mode. */
6034 pCtx->rip = uNewRip;
6035 break;
6036 }
6037
6038 case IEMMODE_32BIT:
6039 {
6040 Assert(uNewRip <= UINT32_MAX);
6041 Assert(pCtx->rip <= UINT32_MAX);
6042 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6043
6044 if (uNewRip > pCtx->cs.u32Limit)
6045 return iemRaiseGeneralProtectionFault0(pVCpu);
6046 pCtx->rip = uNewRip;
6047 break;
6048 }
6049
6050 case IEMMODE_64BIT:
6051 {
6052 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6053
6054 if (!IEM_IS_CANONICAL(uNewRip))
6055 return iemRaiseGeneralProtectionFault0(pVCpu);
6056 pCtx->rip = uNewRip;
6057 break;
6058 }
6059
6060 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6061 }
6062
6063 pCtx->eflags.Bits.u1RF = 0;
6064
6065#ifndef IEM_WITH_CODE_TLB
6066 /* Flush the prefetch buffer. */
6067 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6068#endif
6069
6070 return VINF_SUCCESS;
6071}
6072
6073
6074/**
6075 * Get the address of the top of the stack.
6076 *
6077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6078 * @param pCtx The CPU context which SP/ESP/RSP should be
6079 * read.
6080 */
6081DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6082{
6083 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6084 return pCtx->rsp;
6085 if (pCtx->ss.Attr.n.u1DefBig)
6086 return pCtx->esp;
6087 return pCtx->sp;
6088}
6089
6090
6091/**
6092 * Updates the RIP/EIP/IP to point to the next instruction.
6093 *
6094 * This function leaves the EFLAGS.RF flag alone.
6095 *
6096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6097 * @param cbInstr The number of bytes to add.
6098 */
6099IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6100{
6101 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6102 switch (pVCpu->iem.s.enmCpuMode)
6103 {
6104 case IEMMODE_16BIT:
6105 Assert(pCtx->rip <= UINT16_MAX);
6106 pCtx->eip += cbInstr;
6107 pCtx->eip &= UINT32_C(0xffff);
6108 break;
6109
6110 case IEMMODE_32BIT:
6111 pCtx->eip += cbInstr;
6112 Assert(pCtx->rip <= UINT32_MAX);
6113 break;
6114
6115 case IEMMODE_64BIT:
6116 pCtx->rip += cbInstr;
6117 break;
6118 default: AssertFailed();
6119 }
6120}
6121
6122
6123#if 0
6124/**
6125 * Updates the RIP/EIP/IP to point to the next instruction.
6126 *
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 */
6129IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6130{
6131 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6132}
6133#endif
6134
6135
6136
6137/**
6138 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6139 *
6140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6141 * @param cbInstr The number of bytes to add.
6142 */
6143IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6144{
6145 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6146
6147 pCtx->eflags.Bits.u1RF = 0;
6148
6149 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6150#if ARCH_BITS >= 64
6151 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6152 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6153 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6154#else
6155 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6156 pCtx->rip += cbInstr;
6157 else
6158 {
6159 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6160 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6161 }
6162#endif
6163}
6164
6165
6166/**
6167 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6168 *
6169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6170 */
6171IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6172{
6173 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6174}
6175
6176
6177/**
6178 * Adds to the stack pointer.
6179 *
6180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6181 * @param pCtx The CPU context which SP/ESP/RSP should be
6182 * updated.
6183 * @param cbToAdd The number of bytes to add (8-bit!).
6184 */
6185DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6186{
6187 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6188 pCtx->rsp += cbToAdd;
6189 else if (pCtx->ss.Attr.n.u1DefBig)
6190 pCtx->esp += cbToAdd;
6191 else
6192 pCtx->sp += cbToAdd;
6193}
6194
6195
6196/**
6197 * Subtracts from the stack pointer.
6198 *
6199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6200 * @param pCtx The CPU context which SP/ESP/RSP should be
6201 * updated.
6202 * @param cbToSub The number of bytes to subtract (8-bit!).
6203 */
6204DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6205{
6206 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6207 pCtx->rsp -= cbToSub;
6208 else if (pCtx->ss.Attr.n.u1DefBig)
6209 pCtx->esp -= cbToSub;
6210 else
6211 pCtx->sp -= cbToSub;
6212}
6213
6214
6215/**
6216 * Adds to the temporary stack pointer.
6217 *
6218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6219 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6220 * @param cbToAdd The number of bytes to add (16-bit).
6221 * @param pCtx Where to get the current stack mode.
6222 */
6223DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6224{
6225 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6226 pTmpRsp->u += cbToAdd;
6227 else if (pCtx->ss.Attr.n.u1DefBig)
6228 pTmpRsp->DWords.dw0 += cbToAdd;
6229 else
6230 pTmpRsp->Words.w0 += cbToAdd;
6231}
6232
6233
6234/**
6235 * Subtracts from the temporary stack pointer.
6236 *
6237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6238 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6239 * @param cbToSub The number of bytes to subtract.
6240 * @param pCtx Where to get the current stack mode.
6241 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6242 * expecting that.
6243 */
6244DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6245{
6246 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6247 pTmpRsp->u -= cbToSub;
6248 else if (pCtx->ss.Attr.n.u1DefBig)
6249 pTmpRsp->DWords.dw0 -= cbToSub;
6250 else
6251 pTmpRsp->Words.w0 -= cbToSub;
6252}
6253
6254
6255/**
6256 * Calculates the effective stack address for a push of the specified size as
6257 * well as the new RSP value (upper bits may be masked).
6258 *
6259 * @returns Effective stack addressf for the push.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param pCtx Where to get the current stack mode.
6262 * @param cbItem The size of the stack item to pop.
6263 * @param puNewRsp Where to return the new RSP value.
6264 */
6265DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6266{
6267 RTUINT64U uTmpRsp;
6268 RTGCPTR GCPtrTop;
6269 uTmpRsp.u = pCtx->rsp;
6270
6271 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6272 GCPtrTop = uTmpRsp.u -= cbItem;
6273 else if (pCtx->ss.Attr.n.u1DefBig)
6274 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6275 else
6276 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6277 *puNewRsp = uTmpRsp.u;
6278 return GCPtrTop;
6279}
6280
6281
6282/**
6283 * Gets the current stack pointer and calculates the value after a pop of the
6284 * specified size.
6285 *
6286 * @returns Current stack pointer.
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param pCtx Where to get the current stack mode.
6289 * @param cbItem The size of the stack item to pop.
6290 * @param puNewRsp Where to return the new RSP value.
6291 */
6292DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6293{
6294 RTUINT64U uTmpRsp;
6295 RTGCPTR GCPtrTop;
6296 uTmpRsp.u = pCtx->rsp;
6297
6298 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6299 {
6300 GCPtrTop = uTmpRsp.u;
6301 uTmpRsp.u += cbItem;
6302 }
6303 else if (pCtx->ss.Attr.n.u1DefBig)
6304 {
6305 GCPtrTop = uTmpRsp.DWords.dw0;
6306 uTmpRsp.DWords.dw0 += cbItem;
6307 }
6308 else
6309 {
6310 GCPtrTop = uTmpRsp.Words.w0;
6311 uTmpRsp.Words.w0 += cbItem;
6312 }
6313 *puNewRsp = uTmpRsp.u;
6314 return GCPtrTop;
6315}
6316
6317
6318/**
6319 * Calculates the effective stack address for a push of the specified size as
6320 * well as the new temporary RSP value (upper bits may be masked).
6321 *
6322 * @returns Effective stack addressf for the push.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param pCtx Where to get the current stack mode.
6325 * @param pTmpRsp The temporary stack pointer. This is updated.
6326 * @param cbItem The size of the stack item to pop.
6327 */
6328DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6329{
6330 RTGCPTR GCPtrTop;
6331
6332 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6333 GCPtrTop = pTmpRsp->u -= cbItem;
6334 else if (pCtx->ss.Attr.n.u1DefBig)
6335 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6336 else
6337 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6338 return GCPtrTop;
6339}
6340
6341
6342/**
6343 * Gets the effective stack address for a pop of the specified size and
6344 * calculates and updates the temporary RSP.
6345 *
6346 * @returns Current stack pointer.
6347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6348 * @param pCtx Where to get the current stack mode.
6349 * @param pTmpRsp The temporary stack pointer. This is updated.
6350 * @param cbItem The size of the stack item to pop.
6351 */
6352DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6353{
6354 RTGCPTR GCPtrTop;
6355 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6356 {
6357 GCPtrTop = pTmpRsp->u;
6358 pTmpRsp->u += cbItem;
6359 }
6360 else if (pCtx->ss.Attr.n.u1DefBig)
6361 {
6362 GCPtrTop = pTmpRsp->DWords.dw0;
6363 pTmpRsp->DWords.dw0 += cbItem;
6364 }
6365 else
6366 {
6367 GCPtrTop = pTmpRsp->Words.w0;
6368 pTmpRsp->Words.w0 += cbItem;
6369 }
6370 return GCPtrTop;
6371}
6372
6373/** @} */
6374
6375
6376/** @name FPU access and helpers.
6377 *
6378 * @{
6379 */
6380
6381
6382/**
6383 * Hook for preparing to use the host FPU.
6384 *
6385 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6386 *
6387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6388 */
6389DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6390{
6391#ifdef IN_RING3
6392 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6393#else
6394 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6395#endif
6396}
6397
6398
6399/**
6400 * Hook for preparing to use the host FPU for SSE
6401 *
6402 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6403 *
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 */
6406DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6407{
6408 iemFpuPrepareUsage(pVCpu);
6409}
6410
6411
6412/**
6413 * Hook for actualizing the guest FPU state before the interpreter reads it.
6414 *
6415 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6416 *
6417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6418 */
6419DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6420{
6421#ifdef IN_RING3
6422 NOREF(pVCpu);
6423#else
6424 CPUMRZFpuStateActualizeForRead(pVCpu);
6425#endif
6426}
6427
6428
6429/**
6430 * Hook for actualizing the guest FPU state before the interpreter changes it.
6431 *
6432 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6433 *
6434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6435 */
6436DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6437{
6438#ifdef IN_RING3
6439 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6440#else
6441 CPUMRZFpuStateActualizeForChange(pVCpu);
6442#endif
6443}
6444
6445
6446/**
6447 * Hook for actualizing the guest XMM0..15 register state for read only.
6448 *
6449 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6450 *
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 */
6453DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6454{
6455#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6456 NOREF(pVCpu);
6457#else
6458 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6459#endif
6460}
6461
6462
6463/**
6464 * Hook for actualizing the guest XMM0..15 register state for read+write.
6465 *
6466 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6467 *
6468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6469 */
6470DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6471{
6472#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6473 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6474#else
6475 CPUMRZFpuStateActualizeForChange(pVCpu);
6476#endif
6477}
6478
6479
6480/**
6481 * Stores a QNaN value into a FPU register.
6482 *
6483 * @param pReg Pointer to the register.
6484 */
6485DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6486{
6487 pReg->au32[0] = UINT32_C(0x00000000);
6488 pReg->au32[1] = UINT32_C(0xc0000000);
6489 pReg->au16[4] = UINT16_C(0xffff);
6490}
6491
6492
6493/**
6494 * Updates the FOP, FPU.CS and FPUIP registers.
6495 *
6496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6497 * @param pCtx The CPU context.
6498 * @param pFpuCtx The FPU context.
6499 */
6500DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6501{
6502 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6503 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6504 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6505 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6506 {
6507 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6508 * happens in real mode here based on the fnsave and fnstenv images. */
6509 pFpuCtx->CS = 0;
6510 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6511 }
6512 else
6513 {
6514 pFpuCtx->CS = pCtx->cs.Sel;
6515 pFpuCtx->FPUIP = pCtx->rip;
6516 }
6517}
6518
6519
6520/**
6521 * Updates the x87.DS and FPUDP registers.
6522 *
6523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6524 * @param pCtx The CPU context.
6525 * @param pFpuCtx The FPU context.
6526 * @param iEffSeg The effective segment register.
6527 * @param GCPtrEff The effective address relative to @a iEffSeg.
6528 */
6529DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6530{
6531 RTSEL sel;
6532 switch (iEffSeg)
6533 {
6534 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6535 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6536 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6537 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6538 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6539 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6540 default:
6541 AssertMsgFailed(("%d\n", iEffSeg));
6542 sel = pCtx->ds.Sel;
6543 }
6544 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6545 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6546 {
6547 pFpuCtx->DS = 0;
6548 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6549 }
6550 else
6551 {
6552 pFpuCtx->DS = sel;
6553 pFpuCtx->FPUDP = GCPtrEff;
6554 }
6555}
6556
6557
6558/**
6559 * Rotates the stack registers in the push direction.
6560 *
6561 * @param pFpuCtx The FPU context.
6562 * @remarks This is a complete waste of time, but fxsave stores the registers in
6563 * stack order.
6564 */
6565DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6566{
6567 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6568 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6569 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6570 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6571 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6572 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6573 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6574 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6575 pFpuCtx->aRegs[0].r80 = r80Tmp;
6576}
6577
6578
6579/**
6580 * Rotates the stack registers in the pop direction.
6581 *
6582 * @param pFpuCtx The FPU context.
6583 * @remarks This is a complete waste of time, but fxsave stores the registers in
6584 * stack order.
6585 */
6586DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6587{
6588 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6589 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6590 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6591 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6592 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6593 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6594 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6595 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6596 pFpuCtx->aRegs[7].r80 = r80Tmp;
6597}
6598
6599
6600/**
6601 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6602 * exception prevents it.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 * @param pResult The FPU operation result to push.
6606 * @param pFpuCtx The FPU context.
6607 */
6608IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6609{
6610 /* Update FSW and bail if there are pending exceptions afterwards. */
6611 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6612 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6613 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6614 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6615 {
6616 pFpuCtx->FSW = fFsw;
6617 return;
6618 }
6619
6620 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6621 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6622 {
6623 /* All is fine, push the actual value. */
6624 pFpuCtx->FTW |= RT_BIT(iNewTop);
6625 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6626 }
6627 else if (pFpuCtx->FCW & X86_FCW_IM)
6628 {
6629 /* Masked stack overflow, push QNaN. */
6630 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6631 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6632 }
6633 else
6634 {
6635 /* Raise stack overflow, don't push anything. */
6636 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6637 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6638 return;
6639 }
6640
6641 fFsw &= ~X86_FSW_TOP_MASK;
6642 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6643 pFpuCtx->FSW = fFsw;
6644
6645 iemFpuRotateStackPush(pFpuCtx);
6646}
6647
6648
6649/**
6650 * Stores a result in a FPU register and updates the FSW and FTW.
6651 *
6652 * @param pFpuCtx The FPU context.
6653 * @param pResult The result to store.
6654 * @param iStReg Which FPU register to store it in.
6655 */
6656IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6657{
6658 Assert(iStReg < 8);
6659 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6660 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6661 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6662 pFpuCtx->FTW |= RT_BIT(iReg);
6663 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6664}
6665
6666
6667/**
6668 * Only updates the FPU status word (FSW) with the result of the current
6669 * instruction.
6670 *
6671 * @param pFpuCtx The FPU context.
6672 * @param u16FSW The FSW output of the current instruction.
6673 */
6674IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6675{
6676 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6677 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6678}
6679
6680
6681/**
6682 * Pops one item off the FPU stack if no pending exception prevents it.
6683 *
6684 * @param pFpuCtx The FPU context.
6685 */
6686IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6687{
6688 /* Check pending exceptions. */
6689 uint16_t uFSW = pFpuCtx->FSW;
6690 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6691 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6692 return;
6693
6694 /* TOP--. */
6695 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6696 uFSW &= ~X86_FSW_TOP_MASK;
6697 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6698 pFpuCtx->FSW = uFSW;
6699
6700 /* Mark the previous ST0 as empty. */
6701 iOldTop >>= X86_FSW_TOP_SHIFT;
6702 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6703
6704 /* Rotate the registers. */
6705 iemFpuRotateStackPop(pFpuCtx);
6706}
6707
6708
6709/**
6710 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6711 *
6712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6713 * @param pResult The FPU operation result to push.
6714 */
6715IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6716{
6717 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6718 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6719 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6720 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6721}
6722
6723
6724/**
6725 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6726 * and sets FPUDP and FPUDS.
6727 *
6728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6729 * @param pResult The FPU operation result to push.
6730 * @param iEffSeg The effective segment register.
6731 * @param GCPtrEff The effective address relative to @a iEffSeg.
6732 */
6733IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6734{
6735 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6736 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6737 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6738 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6739 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6740}
6741
6742
6743/**
6744 * Replace ST0 with the first value and push the second onto the FPU stack,
6745 * unless a pending exception prevents it.
6746 *
6747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6748 * @param pResult The FPU operation result to store and push.
6749 */
6750IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6751{
6752 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6753 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6754 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6755
6756 /* Update FSW and bail if there are pending exceptions afterwards. */
6757 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6758 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6759 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6760 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6761 {
6762 pFpuCtx->FSW = fFsw;
6763 return;
6764 }
6765
6766 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6767 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6768 {
6769 /* All is fine, push the actual value. */
6770 pFpuCtx->FTW |= RT_BIT(iNewTop);
6771 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6772 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6773 }
6774 else if (pFpuCtx->FCW & X86_FCW_IM)
6775 {
6776 /* Masked stack overflow, push QNaN. */
6777 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6778 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6779 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6780 }
6781 else
6782 {
6783 /* Raise stack overflow, don't push anything. */
6784 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6785 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6786 return;
6787 }
6788
6789 fFsw &= ~X86_FSW_TOP_MASK;
6790 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6791 pFpuCtx->FSW = fFsw;
6792
6793 iemFpuRotateStackPush(pFpuCtx);
6794}
6795
6796
6797/**
6798 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6799 * FOP.
6800 *
6801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6802 * @param pResult The result to store.
6803 * @param iStReg Which FPU register to store it in.
6804 */
6805IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6806{
6807 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6808 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6809 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6810 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6811}
6812
6813
6814/**
6815 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6816 * FOP, and then pops the stack.
6817 *
6818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6819 * @param pResult The result to store.
6820 * @param iStReg Which FPU register to store it in.
6821 */
6822IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6823{
6824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6825 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6826 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6827 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6828 iemFpuMaybePopOne(pFpuCtx);
6829}
6830
6831
6832/**
6833 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6834 * FPUDP, and FPUDS.
6835 *
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 * @param pResult The result to store.
6838 * @param iStReg Which FPU register to store it in.
6839 * @param iEffSeg The effective memory operand selector register.
6840 * @param GCPtrEff The effective memory operand offset.
6841 */
6842IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6843 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6844{
6845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6846 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6847 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6848 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6849 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6850}
6851
6852
6853/**
6854 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6855 * FPUDP, and FPUDS, and then pops the stack.
6856 *
6857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6858 * @param pResult The result to store.
6859 * @param iStReg Which FPU register to store it in.
6860 * @param iEffSeg The effective memory operand selector register.
6861 * @param GCPtrEff The effective memory operand offset.
6862 */
6863IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6864 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6865{
6866 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6867 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6868 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6870 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6871 iemFpuMaybePopOne(pFpuCtx);
6872}
6873
6874
6875/**
6876 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6877 *
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 */
6880IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6881{
6882 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6883 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6884 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6885}
6886
6887
6888/**
6889 * Marks the specified stack register as free (for FFREE).
6890 *
6891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6892 * @param iStReg The register to free.
6893 */
6894IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6895{
6896 Assert(iStReg < 8);
6897 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6898 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6899 pFpuCtx->FTW &= ~RT_BIT(iReg);
6900}
6901
6902
6903/**
6904 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6905 *
6906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6907 */
6908IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6909{
6910 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6911 uint16_t uFsw = pFpuCtx->FSW;
6912 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6913 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6914 uFsw &= ~X86_FSW_TOP_MASK;
6915 uFsw |= uTop;
6916 pFpuCtx->FSW = uFsw;
6917}
6918
6919
6920/**
6921 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 */
6925IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6926{
6927 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6928 uint16_t uFsw = pFpuCtx->FSW;
6929 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6930 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6931 uFsw &= ~X86_FSW_TOP_MASK;
6932 uFsw |= uTop;
6933 pFpuCtx->FSW = uFsw;
6934}
6935
6936
6937/**
6938 * Updates the FSW, FOP, FPUIP, and FPUCS.
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 * @param u16FSW The FSW from the current instruction.
6942 */
6943IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6944{
6945 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6946 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6947 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6948 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6949}
6950
6951
6952/**
6953 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 * @param u16FSW The FSW from the current instruction.
6957 */
6958IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6959{
6960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6961 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6962 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6963 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6964 iemFpuMaybePopOne(pFpuCtx);
6965}
6966
6967
6968/**
6969 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6970 *
6971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6972 * @param u16FSW The FSW from the current instruction.
6973 * @param iEffSeg The effective memory operand selector register.
6974 * @param GCPtrEff The effective memory operand offset.
6975 */
6976IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6977{
6978 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6979 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6980 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6981 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6982 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6983}
6984
6985
6986/**
6987 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6988 *
6989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6990 * @param u16FSW The FSW from the current instruction.
6991 */
6992IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6993{
6994 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6995 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6996 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6997 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6998 iemFpuMaybePopOne(pFpuCtx);
6999 iemFpuMaybePopOne(pFpuCtx);
7000}
7001
7002
7003/**
7004 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7005 *
7006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7007 * @param u16FSW The FSW from the current instruction.
7008 * @param iEffSeg The effective memory operand selector register.
7009 * @param GCPtrEff The effective memory operand offset.
7010 */
7011IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7012{
7013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7014 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7015 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7016 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7017 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7018 iemFpuMaybePopOne(pFpuCtx);
7019}
7020
7021
7022/**
7023 * Worker routine for raising an FPU stack underflow exception.
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 * @param pFpuCtx The FPU context.
7027 * @param iStReg The stack register being accessed.
7028 */
7029IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
7030{
7031 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7032 if (pFpuCtx->FCW & X86_FCW_IM)
7033 {
7034 /* Masked underflow. */
7035 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7036 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7037 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7038 if (iStReg != UINT8_MAX)
7039 {
7040 pFpuCtx->FTW |= RT_BIT(iReg);
7041 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7042 }
7043 }
7044 else
7045 {
7046 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7047 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7048 }
7049}
7050
7051
7052/**
7053 * Raises a FPU stack underflow exception.
7054 *
7055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7056 * @param iStReg The destination register that should be loaded
7057 * with QNaN if \#IS is not masked. Specify
7058 * UINT8_MAX if none (like for fcom).
7059 */
7060DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7061{
7062 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7063 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7064 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7065 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7066}
7067
7068
7069DECL_NO_INLINE(IEM_STATIC, void)
7070iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7071{
7072 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7073 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7074 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7075 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7076 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7077}
7078
7079
7080DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7081{
7082 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7083 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7084 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7085 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7086 iemFpuMaybePopOne(pFpuCtx);
7087}
7088
7089
7090DECL_NO_INLINE(IEM_STATIC, void)
7091iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7092{
7093 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7094 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7095 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7096 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7097 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7098 iemFpuMaybePopOne(pFpuCtx);
7099}
7100
7101
7102DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7103{
7104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7105 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7106 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7107 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
7108 iemFpuMaybePopOne(pFpuCtx);
7109 iemFpuMaybePopOne(pFpuCtx);
7110}
7111
7112
7113DECL_NO_INLINE(IEM_STATIC, void)
7114iemFpuStackPushUnderflow(PVMCPU pVCpu)
7115{
7116 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7117 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7118 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7119
7120 if (pFpuCtx->FCW & X86_FCW_IM)
7121 {
7122 /* Masked overflow - Push QNaN. */
7123 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7124 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7125 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7126 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7127 pFpuCtx->FTW |= RT_BIT(iNewTop);
7128 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7129 iemFpuRotateStackPush(pFpuCtx);
7130 }
7131 else
7132 {
7133 /* Exception pending - don't change TOP or the register stack. */
7134 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7135 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7136 }
7137}
7138
7139
7140DECL_NO_INLINE(IEM_STATIC, void)
7141iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7142{
7143 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7144 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7145 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7146
7147 if (pFpuCtx->FCW & X86_FCW_IM)
7148 {
7149 /* Masked overflow - Push QNaN. */
7150 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7151 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7152 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7153 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7154 pFpuCtx->FTW |= RT_BIT(iNewTop);
7155 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7156 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7157 iemFpuRotateStackPush(pFpuCtx);
7158 }
7159 else
7160 {
7161 /* Exception pending - don't change TOP or the register stack. */
7162 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7163 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7164 }
7165}
7166
7167
7168/**
7169 * Worker routine for raising an FPU stack overflow exception on a push.
7170 *
7171 * @param pFpuCtx The FPU context.
7172 */
7173IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7174{
7175 if (pFpuCtx->FCW & X86_FCW_IM)
7176 {
7177 /* Masked overflow. */
7178 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7179 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7180 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7181 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7182 pFpuCtx->FTW |= RT_BIT(iNewTop);
7183 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7184 iemFpuRotateStackPush(pFpuCtx);
7185 }
7186 else
7187 {
7188 /* Exception pending - don't change TOP or the register stack. */
7189 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7190 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7191 }
7192}
7193
7194
7195/**
7196 * Raises a FPU stack overflow exception on a push.
7197 *
7198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7199 */
7200DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7201{
7202 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7203 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7204 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7205 iemFpuStackPushOverflowOnly(pFpuCtx);
7206}
7207
7208
7209/**
7210 * Raises a FPU stack overflow exception on a push with a memory operand.
7211 *
7212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7213 * @param iEffSeg The effective memory operand selector register.
7214 * @param GCPtrEff The effective memory operand offset.
7215 */
7216DECL_NO_INLINE(IEM_STATIC, void)
7217iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7218{
7219 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7220 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7221 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7222 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7223 iemFpuStackPushOverflowOnly(pFpuCtx);
7224}
7225
7226
7227IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7228{
7229 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7231 if (pFpuCtx->FTW & RT_BIT(iReg))
7232 return VINF_SUCCESS;
7233 return VERR_NOT_FOUND;
7234}
7235
7236
7237IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7238{
7239 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7240 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7241 if (pFpuCtx->FTW & RT_BIT(iReg))
7242 {
7243 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7244 return VINF_SUCCESS;
7245 }
7246 return VERR_NOT_FOUND;
7247}
7248
7249
7250IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7251 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7252{
7253 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7254 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7255 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7256 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7257 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7258 {
7259 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7260 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7261 return VINF_SUCCESS;
7262 }
7263 return VERR_NOT_FOUND;
7264}
7265
7266
7267IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7268{
7269 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7270 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7271 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7272 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7273 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7274 {
7275 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7276 return VINF_SUCCESS;
7277 }
7278 return VERR_NOT_FOUND;
7279}
7280
7281
7282/**
7283 * Updates the FPU exception status after FCW is changed.
7284 *
7285 * @param pFpuCtx The FPU context.
7286 */
7287IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7288{
7289 uint16_t u16Fsw = pFpuCtx->FSW;
7290 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7291 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7292 else
7293 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7294 pFpuCtx->FSW = u16Fsw;
7295}
7296
7297
7298/**
7299 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7300 *
7301 * @returns The full FTW.
7302 * @param pFpuCtx The FPU context.
7303 */
7304IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7305{
7306 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7307 uint16_t u16Ftw = 0;
7308 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7309 for (unsigned iSt = 0; iSt < 8; iSt++)
7310 {
7311 unsigned const iReg = (iSt + iTop) & 7;
7312 if (!(u8Ftw & RT_BIT(iReg)))
7313 u16Ftw |= 3 << (iReg * 2); /* empty */
7314 else
7315 {
7316 uint16_t uTag;
7317 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7318 if (pr80Reg->s.uExponent == 0x7fff)
7319 uTag = 2; /* Exponent is all 1's => Special. */
7320 else if (pr80Reg->s.uExponent == 0x0000)
7321 {
7322 if (pr80Reg->s.u64Mantissa == 0x0000)
7323 uTag = 1; /* All bits are zero => Zero. */
7324 else
7325 uTag = 2; /* Must be special. */
7326 }
7327 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7328 uTag = 0; /* Valid. */
7329 else
7330 uTag = 2; /* Must be special. */
7331
7332 u16Ftw |= uTag << (iReg * 2); /* empty */
7333 }
7334 }
7335
7336 return u16Ftw;
7337}
7338
7339
7340/**
7341 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7342 *
7343 * @returns The compressed FTW.
7344 * @param u16FullFtw The full FTW to convert.
7345 */
7346IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7347{
7348 uint8_t u8Ftw = 0;
7349 for (unsigned i = 0; i < 8; i++)
7350 {
7351 if ((u16FullFtw & 3) != 3 /*empty*/)
7352 u8Ftw |= RT_BIT(i);
7353 u16FullFtw >>= 2;
7354 }
7355
7356 return u8Ftw;
7357}
7358
7359/** @} */
7360
7361
7362/** @name Memory access.
7363 *
7364 * @{
7365 */
7366
7367
7368/**
7369 * Updates the IEMCPU::cbWritten counter if applicable.
7370 *
7371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7372 * @param fAccess The access being accounted for.
7373 * @param cbMem The access size.
7374 */
7375DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7376{
7377 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7378 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7379 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7380}
7381
7382
7383/**
7384 * Checks if the given segment can be written to, raise the appropriate
7385 * exception if not.
7386 *
7387 * @returns VBox strict status code.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param pHid Pointer to the hidden register.
7391 * @param iSegReg The register number.
7392 * @param pu64BaseAddr Where to return the base address to use for the
7393 * segment. (In 64-bit code it may differ from the
7394 * base in the hidden segment.)
7395 */
7396IEM_STATIC VBOXSTRICTRC
7397iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7398{
7399 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7400 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7401 else
7402 {
7403 if (!pHid->Attr.n.u1Present)
7404 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7405
7406 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7407 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7408 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7409 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7410 *pu64BaseAddr = pHid->u64Base;
7411 }
7412 return VINF_SUCCESS;
7413}
7414
7415
7416/**
7417 * Checks if the given segment can be read from, raise the appropriate
7418 * exception if not.
7419 *
7420 * @returns VBox strict status code.
7421 *
7422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7423 * @param pHid Pointer to the hidden register.
7424 * @param iSegReg The register number.
7425 * @param pu64BaseAddr Where to return the base address to use for the
7426 * segment. (In 64-bit code it may differ from the
7427 * base in the hidden segment.)
7428 */
7429IEM_STATIC VBOXSTRICTRC
7430iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7431{
7432 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7433 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7434 else
7435 {
7436 if (!pHid->Attr.n.u1Present)
7437 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7438
7439 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7440 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7441 *pu64BaseAddr = pHid->u64Base;
7442 }
7443 return VINF_SUCCESS;
7444}
7445
7446
7447/**
7448 * Applies the segment limit, base and attributes.
7449 *
7450 * This may raise a \#GP or \#SS.
7451 *
7452 * @returns VBox strict status code.
7453 *
7454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7455 * @param fAccess The kind of access which is being performed.
7456 * @param iSegReg The index of the segment register to apply.
7457 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7458 * TSS, ++).
7459 * @param cbMem The access size.
7460 * @param pGCPtrMem Pointer to the guest memory address to apply
7461 * segmentation to. Input and output parameter.
7462 */
7463IEM_STATIC VBOXSTRICTRC
7464iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7465{
7466 if (iSegReg == UINT8_MAX)
7467 return VINF_SUCCESS;
7468
7469 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7470 switch (pVCpu->iem.s.enmCpuMode)
7471 {
7472 case IEMMODE_16BIT:
7473 case IEMMODE_32BIT:
7474 {
7475 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7476 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7477
7478 if ( pSel->Attr.n.u1Present
7479 && !pSel->Attr.n.u1Unusable)
7480 {
7481 Assert(pSel->Attr.n.u1DescType);
7482 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7483 {
7484 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7485 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7486 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7487
7488 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7489 {
7490 /** @todo CPL check. */
7491 }
7492
7493 /*
7494 * There are two kinds of data selectors, normal and expand down.
7495 */
7496 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7497 {
7498 if ( GCPtrFirst32 > pSel->u32Limit
7499 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7500 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7501 }
7502 else
7503 {
7504 /*
7505 * The upper boundary is defined by the B bit, not the G bit!
7506 */
7507 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7508 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7509 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7510 }
7511 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7512 }
7513 else
7514 {
7515
7516 /*
7517 * Code selector and usually be used to read thru, writing is
7518 * only permitted in real and V8086 mode.
7519 */
7520 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7521 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7522 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7523 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7524 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7525
7526 if ( GCPtrFirst32 > pSel->u32Limit
7527 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7528 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7529
7530 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7531 {
7532 /** @todo CPL check. */
7533 }
7534
7535 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7536 }
7537 }
7538 else
7539 return iemRaiseGeneralProtectionFault0(pVCpu);
7540 return VINF_SUCCESS;
7541 }
7542
7543 case IEMMODE_64BIT:
7544 {
7545 RTGCPTR GCPtrMem = *pGCPtrMem;
7546 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7547 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7548
7549 Assert(cbMem >= 1);
7550 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7551 return VINF_SUCCESS;
7552 return iemRaiseGeneralProtectionFault0(pVCpu);
7553 }
7554
7555 default:
7556 AssertFailedReturn(VERR_IEM_IPE_7);
7557 }
7558}
7559
7560
7561/**
7562 * Translates a virtual address to a physical physical address and checks if we
7563 * can access the page as specified.
7564 *
7565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7566 * @param GCPtrMem The virtual address.
7567 * @param fAccess The intended access.
7568 * @param pGCPhysMem Where to return the physical address.
7569 */
7570IEM_STATIC VBOXSTRICTRC
7571iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7572{
7573 /** @todo Need a different PGM interface here. We're currently using
7574 * generic / REM interfaces. this won't cut it for R0 & RC. */
7575 RTGCPHYS GCPhys;
7576 uint64_t fFlags;
7577 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7578 if (RT_FAILURE(rc))
7579 {
7580 /** @todo Check unassigned memory in unpaged mode. */
7581 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7582 *pGCPhysMem = NIL_RTGCPHYS;
7583 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7584 }
7585
7586 /* If the page is writable and does not have the no-exec bit set, all
7587 access is allowed. Otherwise we'll have to check more carefully... */
7588 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7589 {
7590 /* Write to read only memory? */
7591 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7592 && !(fFlags & X86_PTE_RW)
7593 && ( pVCpu->iem.s.uCpl != 0
7594 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7595 {
7596 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7597 *pGCPhysMem = NIL_RTGCPHYS;
7598 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7599 }
7600
7601 /* Kernel memory accessed by userland? */
7602 if ( !(fFlags & X86_PTE_US)
7603 && pVCpu->iem.s.uCpl == 3
7604 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7605 {
7606 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7607 *pGCPhysMem = NIL_RTGCPHYS;
7608 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7609 }
7610
7611 /* Executing non-executable memory? */
7612 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7613 && (fFlags & X86_PTE_PAE_NX)
7614 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7615 {
7616 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7617 *pGCPhysMem = NIL_RTGCPHYS;
7618 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7619 VERR_ACCESS_DENIED);
7620 }
7621 }
7622
7623 /*
7624 * Set the dirty / access flags.
7625 * ASSUMES this is set when the address is translated rather than on committ...
7626 */
7627 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7628 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7629 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7630 {
7631 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7632 AssertRC(rc2);
7633 }
7634
7635 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7636 *pGCPhysMem = GCPhys;
7637 return VINF_SUCCESS;
7638}
7639
7640
7641
7642/**
7643 * Maps a physical page.
7644 *
7645 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7647 * @param GCPhysMem The physical address.
7648 * @param fAccess The intended access.
7649 * @param ppvMem Where to return the mapping address.
7650 * @param pLock The PGM lock.
7651 */
7652IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7653{
7654#ifdef IEM_VERIFICATION_MODE_FULL
7655 /* Force the alternative path so we can ignore writes. */
7656 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7657 {
7658 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7659 {
7660 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7661 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7662 if (RT_FAILURE(rc2))
7663 pVCpu->iem.s.fProblematicMemory = true;
7664 }
7665 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7666 }
7667#endif
7668#ifdef IEM_LOG_MEMORY_WRITES
7669 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7670 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7671#endif
7672#ifdef IEM_VERIFICATION_MODE_MINIMAL
7673 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7674#endif
7675
7676 /** @todo This API may require some improving later. A private deal with PGM
7677 * regarding locking and unlocking needs to be struct. A couple of TLBs
7678 * living in PGM, but with publicly accessible inlined access methods
7679 * could perhaps be an even better solution. */
7680 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7681 GCPhysMem,
7682 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7683 pVCpu->iem.s.fBypassHandlers,
7684 ppvMem,
7685 pLock);
7686 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7687 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7688
7689#ifdef IEM_VERIFICATION_MODE_FULL
7690 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7691 pVCpu->iem.s.fProblematicMemory = true;
7692#endif
7693 return rc;
7694}
7695
7696
7697/**
7698 * Unmap a page previously mapped by iemMemPageMap.
7699 *
7700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7701 * @param GCPhysMem The physical address.
7702 * @param fAccess The intended access.
7703 * @param pvMem What iemMemPageMap returned.
7704 * @param pLock The PGM lock.
7705 */
7706DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7707{
7708 NOREF(pVCpu);
7709 NOREF(GCPhysMem);
7710 NOREF(fAccess);
7711 NOREF(pvMem);
7712 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7713}
7714
7715
7716/**
7717 * Looks up a memory mapping entry.
7718 *
7719 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7721 * @param pvMem The memory address.
7722 * @param fAccess The access to.
7723 */
7724DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7725{
7726 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7727 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7728 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7729 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7730 return 0;
7731 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7732 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7733 return 1;
7734 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7735 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7736 return 2;
7737 return VERR_NOT_FOUND;
7738}
7739
7740
7741/**
7742 * Finds a free memmap entry when using iNextMapping doesn't work.
7743 *
7744 * @returns Memory mapping index, 1024 on failure.
7745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7746 */
7747IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7748{
7749 /*
7750 * The easy case.
7751 */
7752 if (pVCpu->iem.s.cActiveMappings == 0)
7753 {
7754 pVCpu->iem.s.iNextMapping = 1;
7755 return 0;
7756 }
7757
7758 /* There should be enough mappings for all instructions. */
7759 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7760
7761 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7762 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7763 return i;
7764
7765 AssertFailedReturn(1024);
7766}
7767
7768
7769/**
7770 * Commits a bounce buffer that needs writing back and unmaps it.
7771 *
7772 * @returns Strict VBox status code.
7773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7774 * @param iMemMap The index of the buffer to commit.
7775 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7776 * Always false in ring-3, obviously.
7777 */
7778IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7779{
7780 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7781 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7782#ifdef IN_RING3
7783 Assert(!fPostponeFail);
7784#endif
7785
7786 /*
7787 * Do the writing.
7788 */
7789#ifndef IEM_VERIFICATION_MODE_MINIMAL
7790 PVM pVM = pVCpu->CTX_SUFF(pVM);
7791 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7792 && !IEM_VERIFICATION_ENABLED(pVCpu))
7793 {
7794 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7795 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7796 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7797 if (!pVCpu->iem.s.fBypassHandlers)
7798 {
7799 /*
7800 * Carefully and efficiently dealing with access handler return
7801 * codes make this a little bloated.
7802 */
7803 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7804 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7805 pbBuf,
7806 cbFirst,
7807 PGMACCESSORIGIN_IEM);
7808 if (rcStrict == VINF_SUCCESS)
7809 {
7810 if (cbSecond)
7811 {
7812 rcStrict = PGMPhysWrite(pVM,
7813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7814 pbBuf + cbFirst,
7815 cbSecond,
7816 PGMACCESSORIGIN_IEM);
7817 if (rcStrict == VINF_SUCCESS)
7818 { /* nothing */ }
7819 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7820 {
7821 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7824 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7825 }
7826# ifndef IN_RING3
7827 else if (fPostponeFail)
7828 {
7829 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7831 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7832 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7833 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7834 return iemSetPassUpStatus(pVCpu, rcStrict);
7835 }
7836# endif
7837 else
7838 {
7839 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7842 return rcStrict;
7843 }
7844 }
7845 }
7846 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7847 {
7848 if (!cbSecond)
7849 {
7850 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7852 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7853 }
7854 else
7855 {
7856 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7858 pbBuf + cbFirst,
7859 cbSecond,
7860 PGMACCESSORIGIN_IEM);
7861 if (rcStrict2 == VINF_SUCCESS)
7862 {
7863 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7865 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7866 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7867 }
7868 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7869 {
7870 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7873 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7874 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7875 }
7876# ifndef IN_RING3
7877 else if (fPostponeFail)
7878 {
7879 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7881 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7882 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7883 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7884 return iemSetPassUpStatus(pVCpu, rcStrict);
7885 }
7886# endif
7887 else
7888 {
7889 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7890 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7892 return rcStrict2;
7893 }
7894 }
7895 }
7896# ifndef IN_RING3
7897 else if (fPostponeFail)
7898 {
7899 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7902 if (!cbSecond)
7903 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7904 else
7905 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7906 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7907 return iemSetPassUpStatus(pVCpu, rcStrict);
7908 }
7909# endif
7910 else
7911 {
7912 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7915 return rcStrict;
7916 }
7917 }
7918 else
7919 {
7920 /*
7921 * No access handlers, much simpler.
7922 */
7923 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7924 if (RT_SUCCESS(rc))
7925 {
7926 if (cbSecond)
7927 {
7928 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7929 if (RT_SUCCESS(rc))
7930 { /* likely */ }
7931 else
7932 {
7933 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7934 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7936 return rc;
7937 }
7938 }
7939 }
7940 else
7941 {
7942 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7945 return rc;
7946 }
7947 }
7948 }
7949#endif
7950
7951#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7952 /*
7953 * Record the write(s).
7954 */
7955 if (!pVCpu->iem.s.fNoRem)
7956 {
7957 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7958 if (pEvtRec)
7959 {
7960 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7961 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7962 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7963 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7964 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7965 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7966 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7967 }
7968 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7969 {
7970 pEvtRec = iemVerifyAllocRecord(pVCpu);
7971 if (pEvtRec)
7972 {
7973 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7974 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7975 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7976 memcpy(pEvtRec->u.RamWrite.ab,
7977 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7978 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7979 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7980 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7981 }
7982 }
7983 }
7984#endif
7985#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7986 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7987 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7988 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7989 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7990 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7991 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7992
7993 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7994 g_cbIemWrote = cbWrote;
7995 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7996#endif
7997
7998 /*
7999 * Free the mapping entry.
8000 */
8001 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8002 Assert(pVCpu->iem.s.cActiveMappings != 0);
8003 pVCpu->iem.s.cActiveMappings--;
8004 return VINF_SUCCESS;
8005}
8006
8007
8008/**
8009 * iemMemMap worker that deals with a request crossing pages.
8010 */
8011IEM_STATIC VBOXSTRICTRC
8012iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8013{
8014 /*
8015 * Do the address translations.
8016 */
8017 RTGCPHYS GCPhysFirst;
8018 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8019 if (rcStrict != VINF_SUCCESS)
8020 return rcStrict;
8021
8022 RTGCPHYS GCPhysSecond;
8023 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8024 fAccess, &GCPhysSecond);
8025 if (rcStrict != VINF_SUCCESS)
8026 return rcStrict;
8027 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8028
8029 PVM pVM = pVCpu->CTX_SUFF(pVM);
8030#ifdef IEM_VERIFICATION_MODE_FULL
8031 /*
8032 * Detect problematic memory when verifying so we can select
8033 * the right execution engine. (TLB: Redo this.)
8034 */
8035 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8036 {
8037 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8038 if (RT_SUCCESS(rc2))
8039 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8040 if (RT_FAILURE(rc2))
8041 pVCpu->iem.s.fProblematicMemory = true;
8042 }
8043#endif
8044
8045
8046 /*
8047 * Read in the current memory content if it's a read, execute or partial
8048 * write access.
8049 */
8050 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8051 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8052 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8053
8054 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8055 {
8056 if (!pVCpu->iem.s.fBypassHandlers)
8057 {
8058 /*
8059 * Must carefully deal with access handler status codes here,
8060 * makes the code a bit bloated.
8061 */
8062 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8063 if (rcStrict == VINF_SUCCESS)
8064 {
8065 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8066 if (rcStrict == VINF_SUCCESS)
8067 { /*likely */ }
8068 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8069 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8070 else
8071 {
8072 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8073 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8074 return rcStrict;
8075 }
8076 }
8077 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8078 {
8079 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8080 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8081 {
8082 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8083 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8084 }
8085 else
8086 {
8087 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8088 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8089 return rcStrict2;
8090 }
8091 }
8092 else
8093 {
8094 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8095 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8096 return rcStrict;
8097 }
8098 }
8099 else
8100 {
8101 /*
8102 * No informational status codes here, much more straight forward.
8103 */
8104 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8105 if (RT_SUCCESS(rc))
8106 {
8107 Assert(rc == VINF_SUCCESS);
8108 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8109 if (RT_SUCCESS(rc))
8110 Assert(rc == VINF_SUCCESS);
8111 else
8112 {
8113 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8114 return rc;
8115 }
8116 }
8117 else
8118 {
8119 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8120 return rc;
8121 }
8122 }
8123
8124#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8125 if ( !pVCpu->iem.s.fNoRem
8126 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8127 {
8128 /*
8129 * Record the reads.
8130 */
8131 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8132 if (pEvtRec)
8133 {
8134 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8135 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8136 pEvtRec->u.RamRead.cb = cbFirstPage;
8137 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8138 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8139 }
8140 pEvtRec = iemVerifyAllocRecord(pVCpu);
8141 if (pEvtRec)
8142 {
8143 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8144 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8145 pEvtRec->u.RamRead.cb = cbSecondPage;
8146 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8147 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8148 }
8149 }
8150#endif
8151 }
8152#ifdef VBOX_STRICT
8153 else
8154 memset(pbBuf, 0xcc, cbMem);
8155 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8156 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8157#endif
8158
8159 /*
8160 * Commit the bounce buffer entry.
8161 */
8162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8163 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8164 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8165 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8166 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8167 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8168 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8169 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8170 pVCpu->iem.s.cActiveMappings++;
8171
8172 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8173 *ppvMem = pbBuf;
8174 return VINF_SUCCESS;
8175}
8176
8177
8178/**
8179 * iemMemMap woker that deals with iemMemPageMap failures.
8180 */
8181IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8182 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8183{
8184 /*
8185 * Filter out conditions we can handle and the ones which shouldn't happen.
8186 */
8187 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8188 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8189 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8190 {
8191 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8192 return rcMap;
8193 }
8194 pVCpu->iem.s.cPotentialExits++;
8195
8196 /*
8197 * Read in the current memory content if it's a read, execute or partial
8198 * write access.
8199 */
8200 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8201 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8202 {
8203 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8204 memset(pbBuf, 0xff, cbMem);
8205 else
8206 {
8207 int rc;
8208 if (!pVCpu->iem.s.fBypassHandlers)
8209 {
8210 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8211 if (rcStrict == VINF_SUCCESS)
8212 { /* nothing */ }
8213 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8214 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8215 else
8216 {
8217 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8218 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8219 return rcStrict;
8220 }
8221 }
8222 else
8223 {
8224 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8225 if (RT_SUCCESS(rc))
8226 { /* likely */ }
8227 else
8228 {
8229 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8230 GCPhysFirst, rc));
8231 return rc;
8232 }
8233 }
8234 }
8235
8236#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8237 if ( !pVCpu->iem.s.fNoRem
8238 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8239 {
8240 /*
8241 * Record the read.
8242 */
8243 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8244 if (pEvtRec)
8245 {
8246 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8247 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8248 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8249 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8250 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8251 }
8252 }
8253#endif
8254 }
8255#ifdef VBOX_STRICT
8256 else
8257 memset(pbBuf, 0xcc, cbMem);
8258#endif
8259#ifdef VBOX_STRICT
8260 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8261 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8262#endif
8263
8264 /*
8265 * Commit the bounce buffer entry.
8266 */
8267 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8268 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8269 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8270 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8271 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8272 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8273 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8274 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8275 pVCpu->iem.s.cActiveMappings++;
8276
8277 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8278 *ppvMem = pbBuf;
8279 return VINF_SUCCESS;
8280}
8281
8282
8283
8284/**
8285 * Maps the specified guest memory for the given kind of access.
8286 *
8287 * This may be using bounce buffering of the memory if it's crossing a page
8288 * boundary or if there is an access handler installed for any of it. Because
8289 * of lock prefix guarantees, we're in for some extra clutter when this
8290 * happens.
8291 *
8292 * This may raise a \#GP, \#SS, \#PF or \#AC.
8293 *
8294 * @returns VBox strict status code.
8295 *
8296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8297 * @param ppvMem Where to return the pointer to the mapped
8298 * memory.
8299 * @param cbMem The number of bytes to map. This is usually 1,
8300 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8301 * string operations it can be up to a page.
8302 * @param iSegReg The index of the segment register to use for
8303 * this access. The base and limits are checked.
8304 * Use UINT8_MAX to indicate that no segmentation
8305 * is required (for IDT, GDT and LDT accesses).
8306 * @param GCPtrMem The address of the guest memory.
8307 * @param fAccess How the memory is being accessed. The
8308 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8309 * how to map the memory, while the
8310 * IEM_ACCESS_WHAT_XXX bit is used when raising
8311 * exceptions.
8312 */
8313IEM_STATIC VBOXSTRICTRC
8314iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8315{
8316 /*
8317 * Check the input and figure out which mapping entry to use.
8318 */
8319 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8320 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8321 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8322
8323 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8324 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8325 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8326 {
8327 iMemMap = iemMemMapFindFree(pVCpu);
8328 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8329 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8330 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8331 pVCpu->iem.s.aMemMappings[2].fAccess),
8332 VERR_IEM_IPE_9);
8333 }
8334
8335 /*
8336 * Map the memory, checking that we can actually access it. If something
8337 * slightly complicated happens, fall back on bounce buffering.
8338 */
8339 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8340 if (rcStrict != VINF_SUCCESS)
8341 return rcStrict;
8342
8343 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8344 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8345
8346 RTGCPHYS GCPhysFirst;
8347 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8348 if (rcStrict != VINF_SUCCESS)
8349 return rcStrict;
8350
8351 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8352 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8353 if (fAccess & IEM_ACCESS_TYPE_READ)
8354 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8355
8356 void *pvMem;
8357 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8358 if (rcStrict != VINF_SUCCESS)
8359 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8360
8361 /*
8362 * Fill in the mapping table entry.
8363 */
8364 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8365 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8366 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8367 pVCpu->iem.s.cActiveMappings++;
8368
8369 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8370 *ppvMem = pvMem;
8371 return VINF_SUCCESS;
8372}
8373
8374
8375/**
8376 * Commits the guest memory if bounce buffered and unmaps it.
8377 *
8378 * @returns Strict VBox status code.
8379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8380 * @param pvMem The mapping.
8381 * @param fAccess The kind of access.
8382 */
8383IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8384{
8385 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8386 AssertReturn(iMemMap >= 0, iMemMap);
8387
8388 /* If it's bounce buffered, we may need to write back the buffer. */
8389 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8390 {
8391 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8392 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8393 }
8394 /* Otherwise unlock it. */
8395 else
8396 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8397
8398 /* Free the entry. */
8399 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8400 Assert(pVCpu->iem.s.cActiveMappings != 0);
8401 pVCpu->iem.s.cActiveMappings--;
8402 return VINF_SUCCESS;
8403}
8404
8405#ifdef IEM_WITH_SETJMP
8406
8407/**
8408 * Maps the specified guest memory for the given kind of access, longjmp on
8409 * error.
8410 *
8411 * This may be using bounce buffering of the memory if it's crossing a page
8412 * boundary or if there is an access handler installed for any of it. Because
8413 * of lock prefix guarantees, we're in for some extra clutter when this
8414 * happens.
8415 *
8416 * This may raise a \#GP, \#SS, \#PF or \#AC.
8417 *
8418 * @returns Pointer to the mapped memory.
8419 *
8420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8421 * @param cbMem The number of bytes to map. This is usually 1,
8422 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8423 * string operations it can be up to a page.
8424 * @param iSegReg The index of the segment register to use for
8425 * this access. The base and limits are checked.
8426 * Use UINT8_MAX to indicate that no segmentation
8427 * is required (for IDT, GDT and LDT accesses).
8428 * @param GCPtrMem The address of the guest memory.
8429 * @param fAccess How the memory is being accessed. The
8430 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8431 * how to map the memory, while the
8432 * IEM_ACCESS_WHAT_XXX bit is used when raising
8433 * exceptions.
8434 */
8435IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8436{
8437 /*
8438 * Check the input and figure out which mapping entry to use.
8439 */
8440 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8441 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8442 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8443
8444 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8445 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8446 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8447 {
8448 iMemMap = iemMemMapFindFree(pVCpu);
8449 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8450 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8451 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8452 pVCpu->iem.s.aMemMappings[2].fAccess),
8453 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8454 }
8455
8456 /*
8457 * Map the memory, checking that we can actually access it. If something
8458 * slightly complicated happens, fall back on bounce buffering.
8459 */
8460 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8461 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8462 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8463
8464 /* Crossing a page boundary? */
8465 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8466 { /* No (likely). */ }
8467 else
8468 {
8469 void *pvMem;
8470 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8471 if (rcStrict == VINF_SUCCESS)
8472 return pvMem;
8473 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8474 }
8475
8476 RTGCPHYS GCPhysFirst;
8477 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8478 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8479 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8480
8481 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8482 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8483 if (fAccess & IEM_ACCESS_TYPE_READ)
8484 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8485
8486 void *pvMem;
8487 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8488 if (rcStrict == VINF_SUCCESS)
8489 { /* likely */ }
8490 else
8491 {
8492 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8493 if (rcStrict == VINF_SUCCESS)
8494 return pvMem;
8495 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8496 }
8497
8498 /*
8499 * Fill in the mapping table entry.
8500 */
8501 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8502 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8503 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8504 pVCpu->iem.s.cActiveMappings++;
8505
8506 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8507 return pvMem;
8508}
8509
8510
8511/**
8512 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8513 *
8514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8515 * @param pvMem The mapping.
8516 * @param fAccess The kind of access.
8517 */
8518IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8519{
8520 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8521 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8522
8523 /* If it's bounce buffered, we may need to write back the buffer. */
8524 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8525 {
8526 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8527 {
8528 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8529 if (rcStrict == VINF_SUCCESS)
8530 return;
8531 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8532 }
8533 }
8534 /* Otherwise unlock it. */
8535 else
8536 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8537
8538 /* Free the entry. */
8539 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8540 Assert(pVCpu->iem.s.cActiveMappings != 0);
8541 pVCpu->iem.s.cActiveMappings--;
8542}
8543
8544#endif
8545
8546#ifndef IN_RING3
8547/**
8548 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8549 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8550 *
8551 * Allows the instruction to be completed and retired, while the IEM user will
8552 * return to ring-3 immediately afterwards and do the postponed writes there.
8553 *
8554 * @returns VBox status code (no strict statuses). Caller must check
8555 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8557 * @param pvMem The mapping.
8558 * @param fAccess The kind of access.
8559 */
8560IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8561{
8562 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8563 AssertReturn(iMemMap >= 0, iMemMap);
8564
8565 /* If it's bounce buffered, we may need to write back the buffer. */
8566 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8567 {
8568 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8569 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8570 }
8571 /* Otherwise unlock it. */
8572 else
8573 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8574
8575 /* Free the entry. */
8576 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8577 Assert(pVCpu->iem.s.cActiveMappings != 0);
8578 pVCpu->iem.s.cActiveMappings--;
8579 return VINF_SUCCESS;
8580}
8581#endif
8582
8583
8584/**
8585 * Rollbacks mappings, releasing page locks and such.
8586 *
8587 * The caller shall only call this after checking cActiveMappings.
8588 *
8589 * @returns Strict VBox status code to pass up.
8590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8591 */
8592IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8593{
8594 Assert(pVCpu->iem.s.cActiveMappings > 0);
8595
8596 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8597 while (iMemMap-- > 0)
8598 {
8599 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8600 if (fAccess != IEM_ACCESS_INVALID)
8601 {
8602 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8603 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8604 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8605 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8606 Assert(pVCpu->iem.s.cActiveMappings > 0);
8607 pVCpu->iem.s.cActiveMappings--;
8608 }
8609 }
8610}
8611
8612
8613/**
8614 * Fetches a data byte.
8615 *
8616 * @returns Strict VBox status code.
8617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8618 * @param pu8Dst Where to return the byte.
8619 * @param iSegReg The index of the segment register to use for
8620 * this access. The base and limits are checked.
8621 * @param GCPtrMem The address of the guest memory.
8622 */
8623IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8624{
8625 /* The lazy approach for now... */
8626 uint8_t const *pu8Src;
8627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8628 if (rc == VINF_SUCCESS)
8629 {
8630 *pu8Dst = *pu8Src;
8631 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8632 }
8633 return rc;
8634}
8635
8636
8637#ifdef IEM_WITH_SETJMP
8638/**
8639 * Fetches a data byte, longjmp on error.
8640 *
8641 * @returns The byte.
8642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8643 * @param iSegReg The index of the segment register to use for
8644 * this access. The base and limits are checked.
8645 * @param GCPtrMem The address of the guest memory.
8646 */
8647DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8648{
8649 /* The lazy approach for now... */
8650 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8651 uint8_t const bRet = *pu8Src;
8652 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8653 return bRet;
8654}
8655#endif /* IEM_WITH_SETJMP */
8656
8657
8658/**
8659 * Fetches a data word.
8660 *
8661 * @returns Strict VBox status code.
8662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8663 * @param pu16Dst Where to return the word.
8664 * @param iSegReg The index of the segment register to use for
8665 * this access. The base and limits are checked.
8666 * @param GCPtrMem The address of the guest memory.
8667 */
8668IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8669{
8670 /* The lazy approach for now... */
8671 uint16_t const *pu16Src;
8672 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8673 if (rc == VINF_SUCCESS)
8674 {
8675 *pu16Dst = *pu16Src;
8676 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8677 }
8678 return rc;
8679}
8680
8681
8682#ifdef IEM_WITH_SETJMP
8683/**
8684 * Fetches a data word, longjmp on error.
8685 *
8686 * @returns The word
8687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8688 * @param iSegReg The index of the segment register to use for
8689 * this access. The base and limits are checked.
8690 * @param GCPtrMem The address of the guest memory.
8691 */
8692DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8693{
8694 /* The lazy approach for now... */
8695 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8696 uint16_t const u16Ret = *pu16Src;
8697 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8698 return u16Ret;
8699}
8700#endif
8701
8702
8703/**
8704 * Fetches a data dword.
8705 *
8706 * @returns Strict VBox status code.
8707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8708 * @param pu32Dst Where to return the dword.
8709 * @param iSegReg The index of the segment register to use for
8710 * this access. The base and limits are checked.
8711 * @param GCPtrMem The address of the guest memory.
8712 */
8713IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8714{
8715 /* The lazy approach for now... */
8716 uint32_t const *pu32Src;
8717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8718 if (rc == VINF_SUCCESS)
8719 {
8720 *pu32Dst = *pu32Src;
8721 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8722 }
8723 return rc;
8724}
8725
8726
8727#ifdef IEM_WITH_SETJMP
8728
8729IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8730{
8731 Assert(cbMem >= 1);
8732 Assert(iSegReg < X86_SREG_COUNT);
8733
8734 /*
8735 * 64-bit mode is simpler.
8736 */
8737 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8738 {
8739 if (iSegReg >= X86_SREG_FS)
8740 {
8741 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8742 GCPtrMem += pSel->u64Base;
8743 }
8744
8745 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8746 return GCPtrMem;
8747 }
8748 /*
8749 * 16-bit and 32-bit segmentation.
8750 */
8751 else
8752 {
8753 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8754 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8755 == X86DESCATTR_P /* data, expand up */
8756 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8757 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8758 {
8759 /* expand up */
8760 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8761 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8762 && GCPtrLast32 > (uint32_t)GCPtrMem))
8763 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8764 }
8765 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8766 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8767 {
8768 /* expand down */
8769 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8770 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8771 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8772 && GCPtrLast32 > (uint32_t)GCPtrMem))
8773 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8774 }
8775 else
8776 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8777 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8778 }
8779 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8780}
8781
8782
8783IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8784{
8785 Assert(cbMem >= 1);
8786 Assert(iSegReg < X86_SREG_COUNT);
8787
8788 /*
8789 * 64-bit mode is simpler.
8790 */
8791 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8792 {
8793 if (iSegReg >= X86_SREG_FS)
8794 {
8795 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8796 GCPtrMem += pSel->u64Base;
8797 }
8798
8799 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8800 return GCPtrMem;
8801 }
8802 /*
8803 * 16-bit and 32-bit segmentation.
8804 */
8805 else
8806 {
8807 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8808 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8809 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8810 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8811 {
8812 /* expand up */
8813 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8814 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8815 && GCPtrLast32 > (uint32_t)GCPtrMem))
8816 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8817 }
8818 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8819 {
8820 /* expand down */
8821 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8822 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8823 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8824 && GCPtrLast32 > (uint32_t)GCPtrMem))
8825 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8826 }
8827 else
8828 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8829 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8830 }
8831 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8832}
8833
8834
8835/**
8836 * Fetches a data dword, longjmp on error, fallback/safe version.
8837 *
8838 * @returns The dword
8839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8840 * @param iSegReg The index of the segment register to use for
8841 * this access. The base and limits are checked.
8842 * @param GCPtrMem The address of the guest memory.
8843 */
8844IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8845{
8846 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8847 uint32_t const u32Ret = *pu32Src;
8848 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8849 return u32Ret;
8850}
8851
8852
8853/**
8854 * Fetches a data dword, longjmp on error.
8855 *
8856 * @returns The dword
8857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8858 * @param iSegReg The index of the segment register to use for
8859 * this access. The base and limits are checked.
8860 * @param GCPtrMem The address of the guest memory.
8861 */
8862DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8863{
8864# ifdef IEM_WITH_DATA_TLB
8865 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8866 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8867 {
8868 /// @todo more later.
8869 }
8870
8871 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8872# else
8873 /* The lazy approach. */
8874 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8875 uint32_t const u32Ret = *pu32Src;
8876 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8877 return u32Ret;
8878# endif
8879}
8880#endif
8881
8882
8883#ifdef SOME_UNUSED_FUNCTION
8884/**
8885 * Fetches a data dword and sign extends it to a qword.
8886 *
8887 * @returns Strict VBox status code.
8888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8889 * @param pu64Dst Where to return the sign extended value.
8890 * @param iSegReg The index of the segment register to use for
8891 * this access. The base and limits are checked.
8892 * @param GCPtrMem The address of the guest memory.
8893 */
8894IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8895{
8896 /* The lazy approach for now... */
8897 int32_t const *pi32Src;
8898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8899 if (rc == VINF_SUCCESS)
8900 {
8901 *pu64Dst = *pi32Src;
8902 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8903 }
8904#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8905 else
8906 *pu64Dst = 0;
8907#endif
8908 return rc;
8909}
8910#endif
8911
8912
8913/**
8914 * Fetches a data qword.
8915 *
8916 * @returns Strict VBox status code.
8917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8918 * @param pu64Dst Where to return the qword.
8919 * @param iSegReg The index of the segment register to use for
8920 * this access. The base and limits are checked.
8921 * @param GCPtrMem The address of the guest memory.
8922 */
8923IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8924{
8925 /* The lazy approach for now... */
8926 uint64_t const *pu64Src;
8927 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8928 if (rc == VINF_SUCCESS)
8929 {
8930 *pu64Dst = *pu64Src;
8931 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8932 }
8933 return rc;
8934}
8935
8936
8937#ifdef IEM_WITH_SETJMP
8938/**
8939 * Fetches a data qword, longjmp on error.
8940 *
8941 * @returns The qword.
8942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8943 * @param iSegReg The index of the segment register to use for
8944 * this access. The base and limits are checked.
8945 * @param GCPtrMem The address of the guest memory.
8946 */
8947DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8948{
8949 /* The lazy approach for now... */
8950 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8951 uint64_t const u64Ret = *pu64Src;
8952 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8953 return u64Ret;
8954}
8955#endif
8956
8957
8958/**
8959 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8960 *
8961 * @returns Strict VBox status code.
8962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8963 * @param pu64Dst Where to return the qword.
8964 * @param iSegReg The index of the segment register to use for
8965 * this access. The base and limits are checked.
8966 * @param GCPtrMem The address of the guest memory.
8967 */
8968IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8969{
8970 /* The lazy approach for now... */
8971 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8972 if (RT_UNLIKELY(GCPtrMem & 15))
8973 return iemRaiseGeneralProtectionFault0(pVCpu);
8974
8975 uint64_t const *pu64Src;
8976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8977 if (rc == VINF_SUCCESS)
8978 {
8979 *pu64Dst = *pu64Src;
8980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8981 }
8982 return rc;
8983}
8984
8985
8986#ifdef IEM_WITH_SETJMP
8987/**
8988 * Fetches a data qword, longjmp on error.
8989 *
8990 * @returns The qword.
8991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8992 * @param iSegReg The index of the segment register to use for
8993 * this access. The base and limits are checked.
8994 * @param GCPtrMem The address of the guest memory.
8995 */
8996DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8997{
8998 /* The lazy approach for now... */
8999 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9000 if (RT_LIKELY(!(GCPtrMem & 15)))
9001 {
9002 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9003 uint64_t const u64Ret = *pu64Src;
9004 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9005 return u64Ret;
9006 }
9007
9008 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9009 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9010}
9011#endif
9012
9013
9014/**
9015 * Fetches a data tword.
9016 *
9017 * @returns Strict VBox status code.
9018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9019 * @param pr80Dst Where to return the tword.
9020 * @param iSegReg The index of the segment register to use for
9021 * this access. The base and limits are checked.
9022 * @param GCPtrMem The address of the guest memory.
9023 */
9024IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9025{
9026 /* The lazy approach for now... */
9027 PCRTFLOAT80U pr80Src;
9028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9029 if (rc == VINF_SUCCESS)
9030 {
9031 *pr80Dst = *pr80Src;
9032 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9033 }
9034 return rc;
9035}
9036
9037
9038#ifdef IEM_WITH_SETJMP
9039/**
9040 * Fetches a data tword, longjmp on error.
9041 *
9042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9043 * @param pr80Dst Where to return the tword.
9044 * @param iSegReg The index of the segment register to use for
9045 * this access. The base and limits are checked.
9046 * @param GCPtrMem The address of the guest memory.
9047 */
9048DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9049{
9050 /* The lazy approach for now... */
9051 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9052 *pr80Dst = *pr80Src;
9053 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9054}
9055#endif
9056
9057
9058/**
9059 * Fetches a data dqword (double qword), generally SSE related.
9060 *
9061 * @returns Strict VBox status code.
9062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9063 * @param pu128Dst Where to return the qword.
9064 * @param iSegReg The index of the segment register to use for
9065 * this access. The base and limits are checked.
9066 * @param GCPtrMem The address of the guest memory.
9067 */
9068IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9069{
9070 /* The lazy approach for now... */
9071 uint128_t const *pu128Src;
9072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9073 if (rc == VINF_SUCCESS)
9074 {
9075 *pu128Dst = *pu128Src;
9076 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9077 }
9078 return rc;
9079}
9080
9081
9082#ifdef IEM_WITH_SETJMP
9083/**
9084 * Fetches a data dqword (double qword), generally SSE related.
9085 *
9086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9087 * @param pu128Dst Where to return the qword.
9088 * @param iSegReg The index of the segment register to use for
9089 * this access. The base and limits are checked.
9090 * @param GCPtrMem The address of the guest memory.
9091 */
9092IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9093{
9094 /* The lazy approach for now... */
9095 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9096 *pu128Dst = *pu128Src;
9097 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9098}
9099#endif
9100
9101
9102/**
9103 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9104 * related.
9105 *
9106 * Raises \#GP(0) if not aligned.
9107 *
9108 * @returns Strict VBox status code.
9109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9110 * @param pu128Dst Where to return the qword.
9111 * @param iSegReg The index of the segment register to use for
9112 * this access. The base and limits are checked.
9113 * @param GCPtrMem The address of the guest memory.
9114 */
9115IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9116{
9117 /* The lazy approach for now... */
9118 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9119 if ( (GCPtrMem & 15)
9120 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9121 return iemRaiseGeneralProtectionFault0(pVCpu);
9122
9123 uint128_t const *pu128Src;
9124 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9125 if (rc == VINF_SUCCESS)
9126 {
9127 *pu128Dst = *pu128Src;
9128 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9129 }
9130 return rc;
9131}
9132
9133
9134#ifdef IEM_WITH_SETJMP
9135/**
9136 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9137 * related, longjmp on error.
9138 *
9139 * Raises \#GP(0) if not aligned.
9140 *
9141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9142 * @param pu128Dst Where to return the qword.
9143 * @param iSegReg The index of the segment register to use for
9144 * this access. The base and limits are checked.
9145 * @param GCPtrMem The address of the guest memory.
9146 */
9147DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9148{
9149 /* The lazy approach for now... */
9150 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9151 if ( (GCPtrMem & 15) == 0
9152 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9153 {
9154 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9155 IEM_ACCESS_DATA_R);
9156 *pu128Dst = *pu128Src;
9157 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9158 return;
9159 }
9160
9161 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9162 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9163}
9164#endif
9165
9166
9167
9168/**
9169 * Fetches a descriptor register (lgdt, lidt).
9170 *
9171 * @returns Strict VBox status code.
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pcbLimit Where to return the limit.
9174 * @param pGCPtrBase Where to return the base.
9175 * @param iSegReg The index of the segment register to use for
9176 * this access. The base and limits are checked.
9177 * @param GCPtrMem The address of the guest memory.
9178 * @param enmOpSize The effective operand size.
9179 */
9180IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9181 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9182{
9183 /*
9184 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9185 * little special:
9186 * - The two reads are done separately.
9187 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9188 * - We suspect the 386 to actually commit the limit before the base in
9189 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9190 * don't try emulate this eccentric behavior, because it's not well
9191 * enough understood and rather hard to trigger.
9192 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9193 */
9194 VBOXSTRICTRC rcStrict;
9195 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9196 {
9197 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9198 if (rcStrict == VINF_SUCCESS)
9199 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9200 }
9201 else
9202 {
9203 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9204 if (enmOpSize == IEMMODE_32BIT)
9205 {
9206 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9207 {
9208 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9209 if (rcStrict == VINF_SUCCESS)
9210 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9211 }
9212 else
9213 {
9214 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9215 if (rcStrict == VINF_SUCCESS)
9216 {
9217 *pcbLimit = (uint16_t)uTmp;
9218 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9219 }
9220 }
9221 if (rcStrict == VINF_SUCCESS)
9222 *pGCPtrBase = uTmp;
9223 }
9224 else
9225 {
9226 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9227 if (rcStrict == VINF_SUCCESS)
9228 {
9229 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9230 if (rcStrict == VINF_SUCCESS)
9231 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9232 }
9233 }
9234 }
9235 return rcStrict;
9236}
9237
9238
9239
9240/**
9241 * Stores a data byte.
9242 *
9243 * @returns Strict VBox status code.
9244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9245 * @param iSegReg The index of the segment register to use for
9246 * this access. The base and limits are checked.
9247 * @param GCPtrMem The address of the guest memory.
9248 * @param u8Value The value to store.
9249 */
9250IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9251{
9252 /* The lazy approach for now... */
9253 uint8_t *pu8Dst;
9254 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9255 if (rc == VINF_SUCCESS)
9256 {
9257 *pu8Dst = u8Value;
9258 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9259 }
9260 return rc;
9261}
9262
9263
9264#ifdef IEM_WITH_SETJMP
9265/**
9266 * Stores a data byte, longjmp on error.
9267 *
9268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9269 * @param iSegReg The index of the segment register to use for
9270 * this access. The base and limits are checked.
9271 * @param GCPtrMem The address of the guest memory.
9272 * @param u8Value The value to store.
9273 */
9274IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9275{
9276 /* The lazy approach for now... */
9277 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9278 *pu8Dst = u8Value;
9279 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9280}
9281#endif
9282
9283
9284/**
9285 * Stores a data word.
9286 *
9287 * @returns Strict VBox status code.
9288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9289 * @param iSegReg The index of the segment register to use for
9290 * this access. The base and limits are checked.
9291 * @param GCPtrMem The address of the guest memory.
9292 * @param u16Value The value to store.
9293 */
9294IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9295{
9296 /* The lazy approach for now... */
9297 uint16_t *pu16Dst;
9298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9299 if (rc == VINF_SUCCESS)
9300 {
9301 *pu16Dst = u16Value;
9302 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9303 }
9304 return rc;
9305}
9306
9307
9308#ifdef IEM_WITH_SETJMP
9309/**
9310 * Stores a data word, longjmp on error.
9311 *
9312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9313 * @param iSegReg The index of the segment register to use for
9314 * this access. The base and limits are checked.
9315 * @param GCPtrMem The address of the guest memory.
9316 * @param u16Value The value to store.
9317 */
9318IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9319{
9320 /* The lazy approach for now... */
9321 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9322 *pu16Dst = u16Value;
9323 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9324}
9325#endif
9326
9327
9328/**
9329 * Stores a data dword.
9330 *
9331 * @returns Strict VBox status code.
9332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9333 * @param iSegReg The index of the segment register to use for
9334 * this access. The base and limits are checked.
9335 * @param GCPtrMem The address of the guest memory.
9336 * @param u32Value The value to store.
9337 */
9338IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9339{
9340 /* The lazy approach for now... */
9341 uint32_t *pu32Dst;
9342 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9343 if (rc == VINF_SUCCESS)
9344 {
9345 *pu32Dst = u32Value;
9346 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9347 }
9348 return rc;
9349}
9350
9351
9352#ifdef IEM_WITH_SETJMP
9353/**
9354 * Stores a data dword.
9355 *
9356 * @returns Strict VBox status code.
9357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9358 * @param iSegReg The index of the segment register to use for
9359 * this access. The base and limits are checked.
9360 * @param GCPtrMem The address of the guest memory.
9361 * @param u32Value The value to store.
9362 */
9363IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9364{
9365 /* The lazy approach for now... */
9366 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9367 *pu32Dst = u32Value;
9368 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9369}
9370#endif
9371
9372
9373/**
9374 * Stores a data qword.
9375 *
9376 * @returns Strict VBox status code.
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 * @param u64Value The value to store.
9382 */
9383IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9384{
9385 /* The lazy approach for now... */
9386 uint64_t *pu64Dst;
9387 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9388 if (rc == VINF_SUCCESS)
9389 {
9390 *pu64Dst = u64Value;
9391 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9392 }
9393 return rc;
9394}
9395
9396
9397#ifdef IEM_WITH_SETJMP
9398/**
9399 * Stores a data qword, longjmp on error.
9400 *
9401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9402 * @param iSegReg The index of the segment register to use for
9403 * this access. The base and limits are checked.
9404 * @param GCPtrMem The address of the guest memory.
9405 * @param u64Value The value to store.
9406 */
9407IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9408{
9409 /* The lazy approach for now... */
9410 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9411 *pu64Dst = u64Value;
9412 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9413}
9414#endif
9415
9416
9417/**
9418 * Stores a data dqword.
9419 *
9420 * @returns Strict VBox status code.
9421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9422 * @param iSegReg The index of the segment register to use for
9423 * this access. The base and limits are checked.
9424 * @param GCPtrMem The address of the guest memory.
9425 * @param u128Value The value to store.
9426 */
9427IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9428{
9429 /* The lazy approach for now... */
9430 uint128_t *pu128Dst;
9431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9432 if (rc == VINF_SUCCESS)
9433 {
9434 *pu128Dst = u128Value;
9435 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9436 }
9437 return rc;
9438}
9439
9440
9441#ifdef IEM_WITH_SETJMP
9442/**
9443 * Stores a data dqword, longjmp on error.
9444 *
9445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9446 * @param iSegReg The index of the segment register to use for
9447 * this access. The base and limits are checked.
9448 * @param GCPtrMem The address of the guest memory.
9449 * @param u128Value The value to store.
9450 */
9451IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9452{
9453 /* The lazy approach for now... */
9454 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9455 *pu128Dst = u128Value;
9456 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9457}
9458#endif
9459
9460
9461/**
9462 * Stores a data dqword, SSE aligned.
9463 *
9464 * @returns Strict VBox status code.
9465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9466 * @param iSegReg The index of the segment register to use for
9467 * this access. The base and limits are checked.
9468 * @param GCPtrMem The address of the guest memory.
9469 * @param u128Value The value to store.
9470 */
9471IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9472{
9473 /* The lazy approach for now... */
9474 if ( (GCPtrMem & 15)
9475 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9476 return iemRaiseGeneralProtectionFault0(pVCpu);
9477
9478 uint128_t *pu128Dst;
9479 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9480 if (rc == VINF_SUCCESS)
9481 {
9482 *pu128Dst = u128Value;
9483 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9484 }
9485 return rc;
9486}
9487
9488
9489#ifdef IEM_WITH_SETJMP
9490/**
9491 * Stores a data dqword, SSE aligned.
9492 *
9493 * @returns Strict VBox status code.
9494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9495 * @param iSegReg The index of the segment register to use for
9496 * this access. The base and limits are checked.
9497 * @param GCPtrMem The address of the guest memory.
9498 * @param u128Value The value to store.
9499 */
9500DECL_NO_INLINE(IEM_STATIC, void)
9501iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9502{
9503 /* The lazy approach for now... */
9504 if ( (GCPtrMem & 15) == 0
9505 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9506 {
9507 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9508 *pu128Dst = u128Value;
9509 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9510 return;
9511 }
9512
9513 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9514 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9515}
9516#endif
9517
9518
9519/**
9520 * Stores a descriptor register (sgdt, sidt).
9521 *
9522 * @returns Strict VBox status code.
9523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9524 * @param cbLimit The limit.
9525 * @param GCPtrBase The base address.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 */
9530IEM_STATIC VBOXSTRICTRC
9531iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9532{
9533 /*
9534 * The SIDT and SGDT instructions actually stores the data using two
9535 * independent writes. The instructions does not respond to opsize prefixes.
9536 */
9537 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9538 if (rcStrict == VINF_SUCCESS)
9539 {
9540 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9541 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9542 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9543 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9544 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9545 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9546 else
9547 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9548 }
9549 return rcStrict;
9550}
9551
9552
9553/**
9554 * Pushes a word onto the stack.
9555 *
9556 * @returns Strict VBox status code.
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param u16Value The value to push.
9559 */
9560IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9561{
9562 /* Increment the stack pointer. */
9563 uint64_t uNewRsp;
9564 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9565 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9566
9567 /* Write the word the lazy way. */
9568 uint16_t *pu16Dst;
9569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9570 if (rc == VINF_SUCCESS)
9571 {
9572 *pu16Dst = u16Value;
9573 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9574 }
9575
9576 /* Commit the new RSP value unless we an access handler made trouble. */
9577 if (rc == VINF_SUCCESS)
9578 pCtx->rsp = uNewRsp;
9579
9580 return rc;
9581}
9582
9583
9584/**
9585 * Pushes a dword onto the stack.
9586 *
9587 * @returns Strict VBox status code.
9588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9589 * @param u32Value The value to push.
9590 */
9591IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9592{
9593 /* Increment the stack pointer. */
9594 uint64_t uNewRsp;
9595 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9596 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9597
9598 /* Write the dword the lazy way. */
9599 uint32_t *pu32Dst;
9600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9601 if (rc == VINF_SUCCESS)
9602 {
9603 *pu32Dst = u32Value;
9604 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9605 }
9606
9607 /* Commit the new RSP value unless we an access handler made trouble. */
9608 if (rc == VINF_SUCCESS)
9609 pCtx->rsp = uNewRsp;
9610
9611 return rc;
9612}
9613
9614
9615/**
9616 * Pushes a dword segment register value onto the stack.
9617 *
9618 * @returns Strict VBox status code.
9619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9620 * @param u32Value The value to push.
9621 */
9622IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9623{
9624 /* Increment the stack pointer. */
9625 uint64_t uNewRsp;
9626 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9627 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9628
9629 VBOXSTRICTRC rc;
9630 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9631 {
9632 /* The recompiler writes a full dword. */
9633 uint32_t *pu32Dst;
9634 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9635 if (rc == VINF_SUCCESS)
9636 {
9637 *pu32Dst = u32Value;
9638 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9639 }
9640 }
9641 else
9642 {
9643 /* The intel docs talks about zero extending the selector register
9644 value. My actual intel CPU here might be zero extending the value
9645 but it still only writes the lower word... */
9646 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9647 * happens when crossing an electric page boundrary, is the high word checked
9648 * for write accessibility or not? Probably it is. What about segment limits?
9649 * It appears this behavior is also shared with trap error codes.
9650 *
9651 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9652 * ancient hardware when it actually did change. */
9653 uint16_t *pu16Dst;
9654 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9655 if (rc == VINF_SUCCESS)
9656 {
9657 *pu16Dst = (uint16_t)u32Value;
9658 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9659 }
9660 }
9661
9662 /* Commit the new RSP value unless we an access handler made trouble. */
9663 if (rc == VINF_SUCCESS)
9664 pCtx->rsp = uNewRsp;
9665
9666 return rc;
9667}
9668
9669
9670/**
9671 * Pushes a qword onto the stack.
9672 *
9673 * @returns Strict VBox status code.
9674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9675 * @param u64Value The value to push.
9676 */
9677IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9678{
9679 /* Increment the stack pointer. */
9680 uint64_t uNewRsp;
9681 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9682 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9683
9684 /* Write the word the lazy way. */
9685 uint64_t *pu64Dst;
9686 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9687 if (rc == VINF_SUCCESS)
9688 {
9689 *pu64Dst = u64Value;
9690 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9691 }
9692
9693 /* Commit the new RSP value unless we an access handler made trouble. */
9694 if (rc == VINF_SUCCESS)
9695 pCtx->rsp = uNewRsp;
9696
9697 return rc;
9698}
9699
9700
9701/**
9702 * Pops a word from the stack.
9703 *
9704 * @returns Strict VBox status code.
9705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9706 * @param pu16Value Where to store the popped value.
9707 */
9708IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9709{
9710 /* Increment the stack pointer. */
9711 uint64_t uNewRsp;
9712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9713 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9714
9715 /* Write the word the lazy way. */
9716 uint16_t const *pu16Src;
9717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9718 if (rc == VINF_SUCCESS)
9719 {
9720 *pu16Value = *pu16Src;
9721 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9722
9723 /* Commit the new RSP value. */
9724 if (rc == VINF_SUCCESS)
9725 pCtx->rsp = uNewRsp;
9726 }
9727
9728 return rc;
9729}
9730
9731
9732/**
9733 * Pops a dword from the stack.
9734 *
9735 * @returns Strict VBox status code.
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param pu32Value Where to store the popped value.
9738 */
9739IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9740{
9741 /* Increment the stack pointer. */
9742 uint64_t uNewRsp;
9743 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9744 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9745
9746 /* Write the word the lazy way. */
9747 uint32_t const *pu32Src;
9748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9749 if (rc == VINF_SUCCESS)
9750 {
9751 *pu32Value = *pu32Src;
9752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9753
9754 /* Commit the new RSP value. */
9755 if (rc == VINF_SUCCESS)
9756 pCtx->rsp = uNewRsp;
9757 }
9758
9759 return rc;
9760}
9761
9762
9763/**
9764 * Pops a qword from the stack.
9765 *
9766 * @returns Strict VBox status code.
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu64Value Where to store the popped value.
9769 */
9770IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9771{
9772 /* Increment the stack pointer. */
9773 uint64_t uNewRsp;
9774 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9775 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9776
9777 /* Write the word the lazy way. */
9778 uint64_t const *pu64Src;
9779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9780 if (rc == VINF_SUCCESS)
9781 {
9782 *pu64Value = *pu64Src;
9783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9784
9785 /* Commit the new RSP value. */
9786 if (rc == VINF_SUCCESS)
9787 pCtx->rsp = uNewRsp;
9788 }
9789
9790 return rc;
9791}
9792
9793
9794/**
9795 * Pushes a word onto the stack, using a temporary stack pointer.
9796 *
9797 * @returns Strict VBox status code.
9798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9799 * @param u16Value The value to push.
9800 * @param pTmpRsp Pointer to the temporary stack pointer.
9801 */
9802IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9803{
9804 /* Increment the stack pointer. */
9805 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9806 RTUINT64U NewRsp = *pTmpRsp;
9807 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9808
9809 /* Write the word the lazy way. */
9810 uint16_t *pu16Dst;
9811 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9812 if (rc == VINF_SUCCESS)
9813 {
9814 *pu16Dst = u16Value;
9815 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9816 }
9817
9818 /* Commit the new RSP value unless we an access handler made trouble. */
9819 if (rc == VINF_SUCCESS)
9820 *pTmpRsp = NewRsp;
9821
9822 return rc;
9823}
9824
9825
9826/**
9827 * Pushes a dword onto the stack, using a temporary stack pointer.
9828 *
9829 * @returns Strict VBox status code.
9830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9831 * @param u32Value The value to push.
9832 * @param pTmpRsp Pointer to the temporary stack pointer.
9833 */
9834IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9835{
9836 /* Increment the stack pointer. */
9837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9838 RTUINT64U NewRsp = *pTmpRsp;
9839 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9840
9841 /* Write the word the lazy way. */
9842 uint32_t *pu32Dst;
9843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9844 if (rc == VINF_SUCCESS)
9845 {
9846 *pu32Dst = u32Value;
9847 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9848 }
9849
9850 /* Commit the new RSP value unless we an access handler made trouble. */
9851 if (rc == VINF_SUCCESS)
9852 *pTmpRsp = NewRsp;
9853
9854 return rc;
9855}
9856
9857
9858/**
9859 * Pushes a dword onto the stack, using a temporary stack pointer.
9860 *
9861 * @returns Strict VBox status code.
9862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9863 * @param u64Value The value to push.
9864 * @param pTmpRsp Pointer to the temporary stack pointer.
9865 */
9866IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9867{
9868 /* Increment the stack pointer. */
9869 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9870 RTUINT64U NewRsp = *pTmpRsp;
9871 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9872
9873 /* Write the word the lazy way. */
9874 uint64_t *pu64Dst;
9875 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9876 if (rc == VINF_SUCCESS)
9877 {
9878 *pu64Dst = u64Value;
9879 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9880 }
9881
9882 /* Commit the new RSP value unless we an access handler made trouble. */
9883 if (rc == VINF_SUCCESS)
9884 *pTmpRsp = NewRsp;
9885
9886 return rc;
9887}
9888
9889
9890/**
9891 * Pops a word from the stack, using a temporary stack pointer.
9892 *
9893 * @returns Strict VBox status code.
9894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9895 * @param pu16Value Where to store the popped value.
9896 * @param pTmpRsp Pointer to the temporary stack pointer.
9897 */
9898IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9899{
9900 /* Increment the stack pointer. */
9901 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9902 RTUINT64U NewRsp = *pTmpRsp;
9903 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9904
9905 /* Write the word the lazy way. */
9906 uint16_t const *pu16Src;
9907 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9908 if (rc == VINF_SUCCESS)
9909 {
9910 *pu16Value = *pu16Src;
9911 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9912
9913 /* Commit the new RSP value. */
9914 if (rc == VINF_SUCCESS)
9915 *pTmpRsp = NewRsp;
9916 }
9917
9918 return rc;
9919}
9920
9921
9922/**
9923 * Pops a dword from the stack, using a temporary stack pointer.
9924 *
9925 * @returns Strict VBox status code.
9926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9927 * @param pu32Value Where to store the popped value.
9928 * @param pTmpRsp Pointer to the temporary stack pointer.
9929 */
9930IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9931{
9932 /* Increment the stack pointer. */
9933 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9934 RTUINT64U NewRsp = *pTmpRsp;
9935 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9936
9937 /* Write the word the lazy way. */
9938 uint32_t const *pu32Src;
9939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9940 if (rc == VINF_SUCCESS)
9941 {
9942 *pu32Value = *pu32Src;
9943 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9944
9945 /* Commit the new RSP value. */
9946 if (rc == VINF_SUCCESS)
9947 *pTmpRsp = NewRsp;
9948 }
9949
9950 return rc;
9951}
9952
9953
9954/**
9955 * Pops a qword from the stack, using a temporary stack pointer.
9956 *
9957 * @returns Strict VBox status code.
9958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9959 * @param pu64Value Where to store the popped value.
9960 * @param pTmpRsp Pointer to the temporary stack pointer.
9961 */
9962IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9963{
9964 /* Increment the stack pointer. */
9965 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9966 RTUINT64U NewRsp = *pTmpRsp;
9967 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9968
9969 /* Write the word the lazy way. */
9970 uint64_t const *pu64Src;
9971 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9972 if (rcStrict == VINF_SUCCESS)
9973 {
9974 *pu64Value = *pu64Src;
9975 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9976
9977 /* Commit the new RSP value. */
9978 if (rcStrict == VINF_SUCCESS)
9979 *pTmpRsp = NewRsp;
9980 }
9981
9982 return rcStrict;
9983}
9984
9985
9986/**
9987 * Begin a special stack push (used by interrupt, exceptions and such).
9988 *
9989 * This will raise \#SS or \#PF if appropriate.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param cbMem The number of bytes to push onto the stack.
9994 * @param ppvMem Where to return the pointer to the stack memory.
9995 * As with the other memory functions this could be
9996 * direct access or bounce buffered access, so
9997 * don't commit register until the commit call
9998 * succeeds.
9999 * @param puNewRsp Where to return the new RSP value. This must be
10000 * passed unchanged to
10001 * iemMemStackPushCommitSpecial().
10002 */
10003IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10004{
10005 Assert(cbMem < UINT8_MAX);
10006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10007 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10008 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10009}
10010
10011
10012/**
10013 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10014 *
10015 * This will update the rSP.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param pvMem The pointer returned by
10020 * iemMemStackPushBeginSpecial().
10021 * @param uNewRsp The new RSP value returned by
10022 * iemMemStackPushBeginSpecial().
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10025{
10026 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10027 if (rcStrict == VINF_SUCCESS)
10028 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10029 return rcStrict;
10030}
10031
10032
10033/**
10034 * Begin a special stack pop (used by iret, retf and such).
10035 *
10036 * This will raise \#SS or \#PF if appropriate.
10037 *
10038 * @returns Strict VBox status code.
10039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10040 * @param cbMem The number of bytes to pop from the stack.
10041 * @param ppvMem Where to return the pointer to the stack memory.
10042 * @param puNewRsp Where to return the new RSP value. This must be
10043 * assigned to CPUMCTX::rsp manually some time
10044 * after iemMemStackPopDoneSpecial() has been
10045 * called.
10046 */
10047IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10048{
10049 Assert(cbMem < UINT8_MAX);
10050 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10051 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10052 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10053}
10054
10055
10056/**
10057 * Continue a special stack pop (used by iret and retf).
10058 *
10059 * This will raise \#SS or \#PF if appropriate.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param cbMem The number of bytes to pop from the stack.
10064 * @param ppvMem Where to return the pointer to the stack memory.
10065 * @param puNewRsp Where to return the new RSP value. This must be
10066 * assigned to CPUMCTX::rsp manually some time
10067 * after iemMemStackPopDoneSpecial() has been
10068 * called.
10069 */
10070IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10071{
10072 Assert(cbMem < UINT8_MAX);
10073 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10074 RTUINT64U NewRsp;
10075 NewRsp.u = *puNewRsp;
10076 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10077 *puNewRsp = NewRsp.u;
10078 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10079}
10080
10081
10082/**
10083 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10084 * iemMemStackPopContinueSpecial).
10085 *
10086 * The caller will manually commit the rSP.
10087 *
10088 * @returns Strict VBox status code.
10089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10090 * @param pvMem The pointer returned by
10091 * iemMemStackPopBeginSpecial() or
10092 * iemMemStackPopContinueSpecial().
10093 */
10094IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10095{
10096 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10097}
10098
10099
10100/**
10101 * Fetches a system table byte.
10102 *
10103 * @returns Strict VBox status code.
10104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10105 * @param pbDst Where to return the byte.
10106 * @param iSegReg The index of the segment register to use for
10107 * this access. The base and limits are checked.
10108 * @param GCPtrMem The address of the guest memory.
10109 */
10110IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10111{
10112 /* The lazy approach for now... */
10113 uint8_t const *pbSrc;
10114 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10115 if (rc == VINF_SUCCESS)
10116 {
10117 *pbDst = *pbSrc;
10118 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10119 }
10120 return rc;
10121}
10122
10123
10124/**
10125 * Fetches a system table word.
10126 *
10127 * @returns Strict VBox status code.
10128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10129 * @param pu16Dst Where to return the word.
10130 * @param iSegReg The index of the segment register to use for
10131 * this access. The base and limits are checked.
10132 * @param GCPtrMem The address of the guest memory.
10133 */
10134IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10135{
10136 /* The lazy approach for now... */
10137 uint16_t const *pu16Src;
10138 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10139 if (rc == VINF_SUCCESS)
10140 {
10141 *pu16Dst = *pu16Src;
10142 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10143 }
10144 return rc;
10145}
10146
10147
10148/**
10149 * Fetches a system table dword.
10150 *
10151 * @returns Strict VBox status code.
10152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10153 * @param pu32Dst Where to return the dword.
10154 * @param iSegReg The index of the segment register to use for
10155 * this access. The base and limits are checked.
10156 * @param GCPtrMem The address of the guest memory.
10157 */
10158IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10159{
10160 /* The lazy approach for now... */
10161 uint32_t const *pu32Src;
10162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10163 if (rc == VINF_SUCCESS)
10164 {
10165 *pu32Dst = *pu32Src;
10166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10167 }
10168 return rc;
10169}
10170
10171
10172/**
10173 * Fetches a system table qword.
10174 *
10175 * @returns Strict VBox status code.
10176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10177 * @param pu64Dst Where to return the qword.
10178 * @param iSegReg The index of the segment register to use for
10179 * this access. The base and limits are checked.
10180 * @param GCPtrMem The address of the guest memory.
10181 */
10182IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10183{
10184 /* The lazy approach for now... */
10185 uint64_t const *pu64Src;
10186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10187 if (rc == VINF_SUCCESS)
10188 {
10189 *pu64Dst = *pu64Src;
10190 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10191 }
10192 return rc;
10193}
10194
10195
10196/**
10197 * Fetches a descriptor table entry with caller specified error code.
10198 *
10199 * @returns Strict VBox status code.
10200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10201 * @param pDesc Where to return the descriptor table entry.
10202 * @param uSel The selector which table entry to fetch.
10203 * @param uXcpt The exception to raise on table lookup error.
10204 * @param uErrorCode The error code associated with the exception.
10205 */
10206IEM_STATIC VBOXSTRICTRC
10207iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10208{
10209 AssertPtr(pDesc);
10210 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10211
10212 /** @todo did the 286 require all 8 bytes to be accessible? */
10213 /*
10214 * Get the selector table base and check bounds.
10215 */
10216 RTGCPTR GCPtrBase;
10217 if (uSel & X86_SEL_LDT)
10218 {
10219 if ( !pCtx->ldtr.Attr.n.u1Present
10220 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10221 {
10222 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10223 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10224 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10225 uErrorCode, 0);
10226 }
10227
10228 Assert(pCtx->ldtr.Attr.n.u1Present);
10229 GCPtrBase = pCtx->ldtr.u64Base;
10230 }
10231 else
10232 {
10233 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10234 {
10235 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10236 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10237 uErrorCode, 0);
10238 }
10239 GCPtrBase = pCtx->gdtr.pGdt;
10240 }
10241
10242 /*
10243 * Read the legacy descriptor and maybe the long mode extensions if
10244 * required.
10245 */
10246 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10247 if (rcStrict == VINF_SUCCESS)
10248 {
10249 if ( !IEM_IS_LONG_MODE(pVCpu)
10250 || pDesc->Legacy.Gen.u1DescType)
10251 pDesc->Long.au64[1] = 0;
10252 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10253 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10254 else
10255 {
10256 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10257 /** @todo is this the right exception? */
10258 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10259 }
10260 }
10261 return rcStrict;
10262}
10263
10264
10265/**
10266 * Fetches a descriptor table entry.
10267 *
10268 * @returns Strict VBox status code.
10269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10270 * @param pDesc Where to return the descriptor table entry.
10271 * @param uSel The selector which table entry to fetch.
10272 * @param uXcpt The exception to raise on table lookup error.
10273 */
10274IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10275{
10276 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10277}
10278
10279
10280/**
10281 * Fakes a long mode stack selector for SS = 0.
10282 *
10283 * @param pDescSs Where to return the fake stack descriptor.
10284 * @param uDpl The DPL we want.
10285 */
10286IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10287{
10288 pDescSs->Long.au64[0] = 0;
10289 pDescSs->Long.au64[1] = 0;
10290 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10291 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10292 pDescSs->Long.Gen.u2Dpl = uDpl;
10293 pDescSs->Long.Gen.u1Present = 1;
10294 pDescSs->Long.Gen.u1Long = 1;
10295}
10296
10297
10298/**
10299 * Marks the selector descriptor as accessed (only non-system descriptors).
10300 *
10301 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10302 * will therefore skip the limit checks.
10303 *
10304 * @returns Strict VBox status code.
10305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10306 * @param uSel The selector.
10307 */
10308IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10309{
10310 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10311
10312 /*
10313 * Get the selector table base and calculate the entry address.
10314 */
10315 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10316 ? pCtx->ldtr.u64Base
10317 : pCtx->gdtr.pGdt;
10318 GCPtr += uSel & X86_SEL_MASK;
10319
10320 /*
10321 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10322 * ugly stuff to avoid this. This will make sure it's an atomic access
10323 * as well more or less remove any question about 8-bit or 32-bit accesss.
10324 */
10325 VBOXSTRICTRC rcStrict;
10326 uint32_t volatile *pu32;
10327 if ((GCPtr & 3) == 0)
10328 {
10329 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10330 GCPtr += 2 + 2;
10331 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10332 if (rcStrict != VINF_SUCCESS)
10333 return rcStrict;
10334 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10335 }
10336 else
10337 {
10338 /* The misaligned GDT/LDT case, map the whole thing. */
10339 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10340 if (rcStrict != VINF_SUCCESS)
10341 return rcStrict;
10342 switch ((uintptr_t)pu32 & 3)
10343 {
10344 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10345 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10346 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10347 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10348 }
10349 }
10350
10351 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10352}
10353
10354/** @} */
10355
10356
10357/*
10358 * Include the C/C++ implementation of instruction.
10359 */
10360#include "IEMAllCImpl.cpp.h"
10361
10362
10363
10364/** @name "Microcode" macros.
10365 *
10366 * The idea is that we should be able to use the same code to interpret
10367 * instructions as well as recompiler instructions. Thus this obfuscation.
10368 *
10369 * @{
10370 */
10371#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10372#define IEM_MC_END() }
10373#define IEM_MC_PAUSE() do {} while (0)
10374#define IEM_MC_CONTINUE() do {} while (0)
10375
10376/** Internal macro. */
10377#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10378 do \
10379 { \
10380 VBOXSTRICTRC rcStrict2 = a_Expr; \
10381 if (rcStrict2 != VINF_SUCCESS) \
10382 return rcStrict2; \
10383 } while (0)
10384
10385
10386#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10387#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10388#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10389#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10390#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10391#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10392#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10393#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10394#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10395 do { \
10396 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10397 return iemRaiseDeviceNotAvailable(pVCpu); \
10398 } while (0)
10399#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10400 do { \
10401 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10402 return iemRaiseMathFault(pVCpu); \
10403 } while (0)
10404#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10405 do { \
10406 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10407 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10408 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10409 return iemRaiseUndefinedOpcode(pVCpu); \
10410 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10411 return iemRaiseDeviceNotAvailable(pVCpu); \
10412 } while (0)
10413#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10414 do { \
10415 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10416 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10417 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10418 return iemRaiseUndefinedOpcode(pVCpu); \
10419 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10420 return iemRaiseDeviceNotAvailable(pVCpu); \
10421 } while (0)
10422#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10423 do { \
10424 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10425 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10426 return iemRaiseUndefinedOpcode(pVCpu); \
10427 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10428 return iemRaiseDeviceNotAvailable(pVCpu); \
10429 } while (0)
10430#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10431 do { \
10432 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10433 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10434 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10435 return iemRaiseUndefinedOpcode(pVCpu); \
10436 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10437 return iemRaiseDeviceNotAvailable(pVCpu); \
10438 } while (0)
10439#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10440 do { \
10441 if (pVCpu->iem.s.uCpl != 0) \
10442 return iemRaiseGeneralProtectionFault0(pVCpu); \
10443 } while (0)
10444
10445
10446#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10447#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10448#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10449#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10450#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10451#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10452#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10453 uint32_t a_Name; \
10454 uint32_t *a_pName = &a_Name
10455#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10456 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10457
10458#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10459#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10460
10461#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10462#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10463#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10464#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10465#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10466#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10467#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10468#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10469#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10470#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10471#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10472#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10473#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10474#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10475#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10476#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10477#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10478#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10479#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10480#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10481#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10482#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10483#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10484#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10485#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10486#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10487#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10488#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10489#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10490/** @note Not for IOPL or IF testing or modification. */
10491#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10492#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10493#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10494#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10495
10496#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10497#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10498#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10499#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10500#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10501#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10502#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10503#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10504#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10505#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10506#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10507 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10508
10509#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10510#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10511/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10512 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10513#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10514#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10515/** @note Not for IOPL or IF testing or modification. */
10516#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10517
10518#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10519#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10520#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10521 do { \
10522 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10523 *pu32Reg += (a_u32Value); \
10524 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10525 } while (0)
10526#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10527
10528#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10529#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10530#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10531 do { \
10532 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10533 *pu32Reg -= (a_u32Value); \
10534 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10535 } while (0)
10536#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10537#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10538
10539#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10540#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10541#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10542#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10543#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10544#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10545#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10546
10547#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10548#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10549#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10550#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10551
10552#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10553#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10554#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10555
10556#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10557#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10558#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10559
10560#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10561#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10562#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10563
10564#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10565#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10566#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10567
10568#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10569
10570#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10571
10572#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10573#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10574#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10575 do { \
10576 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10577 *pu32Reg &= (a_u32Value); \
10578 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10579 } while (0)
10580#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10581
10582#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10583#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10584#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10585 do { \
10586 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10587 *pu32Reg |= (a_u32Value); \
10588 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10589 } while (0)
10590#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10591
10592
10593/** @note Not for IOPL or IF modification. */
10594#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10595/** @note Not for IOPL or IF modification. */
10596#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10597/** @note Not for IOPL or IF modification. */
10598#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10599
10600#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10601
10602
10603#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10604 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10605#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10606 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10607#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10608 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10609#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10610 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10611#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10612 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10613#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10614 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10615#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10616 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10617
10618#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10619 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10620#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10621 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10622#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10623 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10624#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10625 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10626#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10627 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10628#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10629 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10630 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10631 } while (0)
10632#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10633 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10634 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10635 } while (0)
10636#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10637 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10638#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10639 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10640#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10641 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10642#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10643 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10644 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10645
10646#ifndef IEM_WITH_SETJMP
10647# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10649# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10651# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10653#else
10654# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10655 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10656# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10657 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10658# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10659 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10660#endif
10661
10662#ifndef IEM_WITH_SETJMP
10663# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10665# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10667# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10669#else
10670# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10671 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10672# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10673 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10674# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10675 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10676#endif
10677
10678#ifndef IEM_WITH_SETJMP
10679# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10681# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10683# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10685#else
10686# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10687 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10688# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10689 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10690# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10691 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10692#endif
10693
10694#ifdef SOME_UNUSED_FUNCTION
10695# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10697#endif
10698
10699#ifndef IEM_WITH_SETJMP
10700# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10702# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10704# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10706# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10708#else
10709# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10710 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10711# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10712 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10713# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10714 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10715# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10716 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10717#endif
10718
10719#ifndef IEM_WITH_SETJMP
10720# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10722# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10724# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10726#else
10727# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10728 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10729# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10730 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10731# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10732 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10733#endif
10734
10735#ifndef IEM_WITH_SETJMP
10736# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10738# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10740#else
10741# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10742 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10743# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10744 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10745#endif
10746
10747
10748
10749#ifndef IEM_WITH_SETJMP
10750# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10751 do { \
10752 uint8_t u8Tmp; \
10753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10754 (a_u16Dst) = u8Tmp; \
10755 } while (0)
10756# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10757 do { \
10758 uint8_t u8Tmp; \
10759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10760 (a_u32Dst) = u8Tmp; \
10761 } while (0)
10762# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10763 do { \
10764 uint8_t u8Tmp; \
10765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10766 (a_u64Dst) = u8Tmp; \
10767 } while (0)
10768# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10769 do { \
10770 uint16_t u16Tmp; \
10771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10772 (a_u32Dst) = u16Tmp; \
10773 } while (0)
10774# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10775 do { \
10776 uint16_t u16Tmp; \
10777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10778 (a_u64Dst) = u16Tmp; \
10779 } while (0)
10780# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10781 do { \
10782 uint32_t u32Tmp; \
10783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10784 (a_u64Dst) = u32Tmp; \
10785 } while (0)
10786#else /* IEM_WITH_SETJMP */
10787# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10788 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10789# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10790 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10791# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10792 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10793# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10794 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10795# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10796 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10797# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10798 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10799#endif /* IEM_WITH_SETJMP */
10800
10801#ifndef IEM_WITH_SETJMP
10802# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10803 do { \
10804 uint8_t u8Tmp; \
10805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10806 (a_u16Dst) = (int8_t)u8Tmp; \
10807 } while (0)
10808# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10809 do { \
10810 uint8_t u8Tmp; \
10811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10812 (a_u32Dst) = (int8_t)u8Tmp; \
10813 } while (0)
10814# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10815 do { \
10816 uint8_t u8Tmp; \
10817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10818 (a_u64Dst) = (int8_t)u8Tmp; \
10819 } while (0)
10820# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10821 do { \
10822 uint16_t u16Tmp; \
10823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10824 (a_u32Dst) = (int16_t)u16Tmp; \
10825 } while (0)
10826# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10827 do { \
10828 uint16_t u16Tmp; \
10829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10830 (a_u64Dst) = (int16_t)u16Tmp; \
10831 } while (0)
10832# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10833 do { \
10834 uint32_t u32Tmp; \
10835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10836 (a_u64Dst) = (int32_t)u32Tmp; \
10837 } while (0)
10838#else /* IEM_WITH_SETJMP */
10839# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10840 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10841# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10842 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10843# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10844 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10845# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10846 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10847# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10848 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10849# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10850 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10851#endif /* IEM_WITH_SETJMP */
10852
10853#ifndef IEM_WITH_SETJMP
10854# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10855 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10856# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10857 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10858# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10860# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10862#else
10863# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10864 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10865# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10866 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10867# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10868 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10869# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10870 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10871#endif
10872
10873#ifndef IEM_WITH_SETJMP
10874# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10876# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10878# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10880# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10882#else
10883# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10884 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10885# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10886 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10887# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10888 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10889# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10890 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10891#endif
10892
10893#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10894#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10895#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10896#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10897#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10898#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10899#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10900 do { \
10901 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10902 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10903 } while (0)
10904
10905#ifndef IEM_WITH_SETJMP
10906# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10908# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10910#else
10911# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10912 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10913# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10914 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10915#endif
10916
10917
10918#define IEM_MC_PUSH_U16(a_u16Value) \
10919 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10920#define IEM_MC_PUSH_U32(a_u32Value) \
10921 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10922#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10923 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10924#define IEM_MC_PUSH_U64(a_u64Value) \
10925 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10926
10927#define IEM_MC_POP_U16(a_pu16Value) \
10928 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10929#define IEM_MC_POP_U32(a_pu32Value) \
10930 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10931#define IEM_MC_POP_U64(a_pu64Value) \
10932 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10933
10934/** Maps guest memory for direct or bounce buffered access.
10935 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10936 * @remarks May return.
10937 */
10938#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10939 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10940
10941/** Maps guest memory for direct or bounce buffered access.
10942 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10943 * @remarks May return.
10944 */
10945#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10946 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10947
10948/** Commits the memory and unmaps the guest memory.
10949 * @remarks May return.
10950 */
10951#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10952 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10953
10954/** Commits the memory and unmaps the guest memory unless the FPU status word
10955 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10956 * that would cause FLD not to store.
10957 *
10958 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10959 * store, while \#P will not.
10960 *
10961 * @remarks May in theory return - for now.
10962 */
10963#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10964 do { \
10965 if ( !(a_u16FSW & X86_FSW_ES) \
10966 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10967 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10968 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10969 } while (0)
10970
10971/** Calculate efficient address from R/M. */
10972#ifndef IEM_WITH_SETJMP
10973# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10974 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10975#else
10976# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10977 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10978#endif
10979
10980#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10981#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10982#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10983#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10984#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10985#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10986#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10987
10988/**
10989 * Defers the rest of the instruction emulation to a C implementation routine
10990 * and returns, only taking the standard parameters.
10991 *
10992 * @param a_pfnCImpl The pointer to the C routine.
10993 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10994 */
10995#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10996
10997/**
10998 * Defers the rest of instruction emulation to a C implementation routine and
10999 * returns, taking one argument in addition to the standard ones.
11000 *
11001 * @param a_pfnCImpl The pointer to the C routine.
11002 * @param a0 The argument.
11003 */
11004#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11005
11006/**
11007 * Defers the rest of the instruction emulation to a C implementation routine
11008 * and returns, taking two arguments in addition to the standard ones.
11009 *
11010 * @param a_pfnCImpl The pointer to the C routine.
11011 * @param a0 The first extra argument.
11012 * @param a1 The second extra argument.
11013 */
11014#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11015
11016/**
11017 * Defers the rest of the instruction emulation to a C implementation routine
11018 * and returns, taking three arguments in addition to the standard ones.
11019 *
11020 * @param a_pfnCImpl The pointer to the C routine.
11021 * @param a0 The first extra argument.
11022 * @param a1 The second extra argument.
11023 * @param a2 The third extra argument.
11024 */
11025#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11026
11027/**
11028 * Defers the rest of the instruction emulation to a C implementation routine
11029 * and returns, taking four arguments in addition to the standard ones.
11030 *
11031 * @param a_pfnCImpl The pointer to the C routine.
11032 * @param a0 The first extra argument.
11033 * @param a1 The second extra argument.
11034 * @param a2 The third extra argument.
11035 * @param a3 The fourth extra argument.
11036 */
11037#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11038
11039/**
11040 * Defers the rest of the instruction emulation to a C implementation routine
11041 * and returns, taking two arguments in addition to the standard ones.
11042 *
11043 * @param a_pfnCImpl The pointer to the C routine.
11044 * @param a0 The first extra argument.
11045 * @param a1 The second extra argument.
11046 * @param a2 The third extra argument.
11047 * @param a3 The fourth extra argument.
11048 * @param a4 The fifth extra argument.
11049 */
11050#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11051
11052/**
11053 * Defers the entire instruction emulation to a C implementation routine and
11054 * returns, only taking the standard parameters.
11055 *
11056 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11057 *
11058 * @param a_pfnCImpl The pointer to the C routine.
11059 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11060 */
11061#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11062
11063/**
11064 * Defers the entire instruction emulation to a C implementation routine and
11065 * returns, taking one argument in addition to the standard ones.
11066 *
11067 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11068 *
11069 * @param a_pfnCImpl The pointer to the C routine.
11070 * @param a0 The argument.
11071 */
11072#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11073
11074/**
11075 * Defers the entire instruction emulation to a C implementation routine and
11076 * returns, taking two arguments in addition to the standard ones.
11077 *
11078 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11079 *
11080 * @param a_pfnCImpl The pointer to the C routine.
11081 * @param a0 The first extra argument.
11082 * @param a1 The second extra argument.
11083 */
11084#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11085
11086/**
11087 * Defers the entire instruction emulation to a C implementation routine and
11088 * returns, taking three arguments in addition to the standard ones.
11089 *
11090 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11091 *
11092 * @param a_pfnCImpl The pointer to the C routine.
11093 * @param a0 The first extra argument.
11094 * @param a1 The second extra argument.
11095 * @param a2 The third extra argument.
11096 */
11097#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11098
11099/**
11100 * Calls a FPU assembly implementation taking one visible argument.
11101 *
11102 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11103 * @param a0 The first extra argument.
11104 */
11105#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11106 do { \
11107 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11108 } while (0)
11109
11110/**
11111 * Calls a FPU assembly implementation taking two visible arguments.
11112 *
11113 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11114 * @param a0 The first extra argument.
11115 * @param a1 The second extra argument.
11116 */
11117#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11118 do { \
11119 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11120 } while (0)
11121
11122/**
11123 * Calls a FPU assembly implementation taking three visible arguments.
11124 *
11125 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11126 * @param a0 The first extra argument.
11127 * @param a1 The second extra argument.
11128 * @param a2 The third extra argument.
11129 */
11130#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11131 do { \
11132 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11133 } while (0)
11134
11135#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11136 do { \
11137 (a_FpuData).FSW = (a_FSW); \
11138 (a_FpuData).r80Result = *(a_pr80Value); \
11139 } while (0)
11140
11141/** Pushes FPU result onto the stack. */
11142#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11143 iemFpuPushResult(pVCpu, &a_FpuData)
11144/** Pushes FPU result onto the stack and sets the FPUDP. */
11145#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11146 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11147
11148/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11149#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11150 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11151
11152/** Stores FPU result in a stack register. */
11153#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11154 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11155/** Stores FPU result in a stack register and pops the stack. */
11156#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11157 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11158/** Stores FPU result in a stack register and sets the FPUDP. */
11159#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11160 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11161/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11162 * stack. */
11163#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11164 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11165
11166/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11167#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11168 iemFpuUpdateOpcodeAndIp(pVCpu)
11169/** Free a stack register (for FFREE and FFREEP). */
11170#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11171 iemFpuStackFree(pVCpu, a_iStReg)
11172/** Increment the FPU stack pointer. */
11173#define IEM_MC_FPU_STACK_INC_TOP() \
11174 iemFpuStackIncTop(pVCpu)
11175/** Decrement the FPU stack pointer. */
11176#define IEM_MC_FPU_STACK_DEC_TOP() \
11177 iemFpuStackDecTop(pVCpu)
11178
11179/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11180#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11181 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11182/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11183#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11184 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11185/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11186#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11187 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11188/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11189#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11190 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11191/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11192 * stack. */
11193#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11194 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11195/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11196#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11197 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11198
11199/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11200#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11201 iemFpuStackUnderflow(pVCpu, a_iStDst)
11202/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11203 * stack. */
11204#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11205 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11206/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11207 * FPUDS. */
11208#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11209 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11210/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11211 * FPUDS. Pops stack. */
11212#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11213 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11214/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11215 * stack twice. */
11216#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11217 iemFpuStackUnderflowThenPopPop(pVCpu)
11218/** Raises a FPU stack underflow exception for an instruction pushing a result
11219 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11220#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11221 iemFpuStackPushUnderflow(pVCpu)
11222/** Raises a FPU stack underflow exception for an instruction pushing a result
11223 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11224#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11225 iemFpuStackPushUnderflowTwo(pVCpu)
11226
11227/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11228 * FPUIP, FPUCS and FOP. */
11229#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11230 iemFpuStackPushOverflow(pVCpu)
11231/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11232 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11233#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11234 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11235/** Prepares for using the FPU state.
11236 * Ensures that we can use the host FPU in the current context (RC+R0.
11237 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11238#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11239/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11240#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11241/** Actualizes the guest FPU state so it can be accessed and modified. */
11242#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11243
11244/** Prepares for using the SSE state.
11245 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11246 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11247#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11248/** Actualizes the guest XMM0..15 register state for read-only access. */
11249#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11250/** Actualizes the guest XMM0..15 register state for read-write access. */
11251#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11252
11253/**
11254 * Calls a MMX assembly implementation taking two visible arguments.
11255 *
11256 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11257 * @param a0 The first extra argument.
11258 * @param a1 The second extra argument.
11259 */
11260#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11261 do { \
11262 IEM_MC_PREPARE_FPU_USAGE(); \
11263 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11264 } while (0)
11265
11266/**
11267 * Calls a MMX assembly implementation taking three visible arguments.
11268 *
11269 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11270 * @param a0 The first extra argument.
11271 * @param a1 The second extra argument.
11272 * @param a2 The third extra argument.
11273 */
11274#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11275 do { \
11276 IEM_MC_PREPARE_FPU_USAGE(); \
11277 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11278 } while (0)
11279
11280
11281/**
11282 * Calls a SSE assembly implementation taking two visible arguments.
11283 *
11284 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11285 * @param a0 The first extra argument.
11286 * @param a1 The second extra argument.
11287 */
11288#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11289 do { \
11290 IEM_MC_PREPARE_SSE_USAGE(); \
11291 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11292 } while (0)
11293
11294/**
11295 * Calls a SSE assembly implementation taking three visible arguments.
11296 *
11297 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11298 * @param a0 The first extra argument.
11299 * @param a1 The second extra argument.
11300 * @param a2 The third extra argument.
11301 */
11302#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11303 do { \
11304 IEM_MC_PREPARE_SSE_USAGE(); \
11305 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11306 } while (0)
11307
11308/** @note Not for IOPL or IF testing. */
11309#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11310/** @note Not for IOPL or IF testing. */
11311#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11312/** @note Not for IOPL or IF testing. */
11313#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11314/** @note Not for IOPL or IF testing. */
11315#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11316/** @note Not for IOPL or IF testing. */
11317#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11318 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11319 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11320/** @note Not for IOPL or IF testing. */
11321#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11322 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11323 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11324/** @note Not for IOPL or IF testing. */
11325#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11326 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11327 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11328 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11329/** @note Not for IOPL or IF testing. */
11330#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11331 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11332 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11333 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11334#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11335#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11336#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11337/** @note Not for IOPL or IF testing. */
11338#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11339 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11340 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11341/** @note Not for IOPL or IF testing. */
11342#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11343 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11344 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11345/** @note Not for IOPL or IF testing. */
11346#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11347 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11348 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11349/** @note Not for IOPL or IF testing. */
11350#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11351 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11352 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11353/** @note Not for IOPL or IF testing. */
11354#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11355 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11356 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11357/** @note Not for IOPL or IF testing. */
11358#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11359 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11360 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11361#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11362#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11363
11364#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11365 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11366#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11367 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11368#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11369 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11370#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11371 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11372#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11373 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11374#define IEM_MC_IF_FCW_IM() \
11375 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11376
11377#define IEM_MC_ELSE() } else {
11378#define IEM_MC_ENDIF() } do {} while (0)
11379
11380/** @} */
11381
11382
11383/** @name Opcode Debug Helpers.
11384 * @{
11385 */
11386#ifdef DEBUG
11387# define IEMOP_MNEMONIC(a_szMnemonic) \
11388 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11389 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11390# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11391 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11392 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11393#else
11394# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11395# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11396#endif
11397
11398/** @} */
11399
11400
11401/** @name Opcode Helpers.
11402 * @{
11403 */
11404
11405#ifdef IN_RING3
11406# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11407 do { \
11408 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11409 else \
11410 { \
11411 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11412 return IEMOP_RAISE_INVALID_OPCODE(); \
11413 } \
11414 } while (0)
11415#else
11416# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11417 do { \
11418 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11419 else return IEMOP_RAISE_INVALID_OPCODE(); \
11420 } while (0)
11421#endif
11422
11423/** The instruction requires a 186 or later. */
11424#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11425# define IEMOP_HLP_MIN_186() do { } while (0)
11426#else
11427# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11428#endif
11429
11430/** The instruction requires a 286 or later. */
11431#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11432# define IEMOP_HLP_MIN_286() do { } while (0)
11433#else
11434# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11435#endif
11436
11437/** The instruction requires a 386 or later. */
11438#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11439# define IEMOP_HLP_MIN_386() do { } while (0)
11440#else
11441# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11442#endif
11443
11444/** The instruction requires a 386 or later if the given expression is true. */
11445#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11446# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11447#else
11448# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11449#endif
11450
11451/** The instruction requires a 486 or later. */
11452#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11453# define IEMOP_HLP_MIN_486() do { } while (0)
11454#else
11455# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11456#endif
11457
11458/** The instruction requires a Pentium (586) or later. */
11459#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11460# define IEMOP_HLP_MIN_586() do { } while (0)
11461#else
11462# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11463#endif
11464
11465/** The instruction requires a PentiumPro (686) or later. */
11466#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11467# define IEMOP_HLP_MIN_686() do { } while (0)
11468#else
11469# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11470#endif
11471
11472
11473/** The instruction raises an \#UD in real and V8086 mode. */
11474#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11475 do \
11476 { \
11477 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11478 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11479 } while (0)
11480
11481/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11482 * 64-bit mode. */
11483#define IEMOP_HLP_NO_64BIT() \
11484 do \
11485 { \
11486 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11487 return IEMOP_RAISE_INVALID_OPCODE(); \
11488 } while (0)
11489
11490/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11491 * 64-bit mode. */
11492#define IEMOP_HLP_ONLY_64BIT() \
11493 do \
11494 { \
11495 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11496 return IEMOP_RAISE_INVALID_OPCODE(); \
11497 } while (0)
11498
11499/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11500#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11501 do \
11502 { \
11503 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11504 iemRecalEffOpSize64Default(pVCpu); \
11505 } while (0)
11506
11507/** The instruction has 64-bit operand size if 64-bit mode. */
11508#define IEMOP_HLP_64BIT_OP_SIZE() \
11509 do \
11510 { \
11511 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11512 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11513 } while (0)
11514
11515/** Only a REX prefix immediately preceeding the first opcode byte takes
11516 * effect. This macro helps ensuring this as well as logging bad guest code. */
11517#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11518 do \
11519 { \
11520 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11521 { \
11522 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11523 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11524 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11525 pVCpu->iem.s.uRexB = 0; \
11526 pVCpu->iem.s.uRexIndex = 0; \
11527 pVCpu->iem.s.uRexReg = 0; \
11528 iemRecalEffOpSize(pVCpu); \
11529 } \
11530 } while (0)
11531
11532/**
11533 * Done decoding.
11534 */
11535#define IEMOP_HLP_DONE_DECODING() \
11536 do \
11537 { \
11538 /*nothing for now, maybe later... */ \
11539 } while (0)
11540
11541/**
11542 * Done decoding, raise \#UD exception if lock prefix present.
11543 */
11544#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11545 do \
11546 { \
11547 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11548 { /* likely */ } \
11549 else \
11550 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11551 } while (0)
11552#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11553 do \
11554 { \
11555 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11556 { /* likely */ } \
11557 else \
11558 { \
11559 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11560 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11561 } \
11562 } while (0)
11563#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11564 do \
11565 { \
11566 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11567 { /* likely */ } \
11568 else \
11569 { \
11570 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11571 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11572 } \
11573 } while (0)
11574
11575/**
11576 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11577 * are present.
11578 */
11579#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11580 do \
11581 { \
11582 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11583 { /* likely */ } \
11584 else \
11585 return IEMOP_RAISE_INVALID_OPCODE(); \
11586 } while (0)
11587
11588
11589/**
11590 * Calculates the effective address of a ModR/M memory operand.
11591 *
11592 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11593 *
11594 * @return Strict VBox status code.
11595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11596 * @param bRm The ModRM byte.
11597 * @param cbImm The size of any immediate following the
11598 * effective address opcode bytes. Important for
11599 * RIP relative addressing.
11600 * @param pGCPtrEff Where to return the effective address.
11601 */
11602IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11603{
11604 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11605 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11606# define SET_SS_DEF() \
11607 do \
11608 { \
11609 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11610 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11611 } while (0)
11612
11613 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11614 {
11615/** @todo Check the effective address size crap! */
11616 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11617 {
11618 uint16_t u16EffAddr;
11619
11620 /* Handle the disp16 form with no registers first. */
11621 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11622 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11623 else
11624 {
11625 /* Get the displacment. */
11626 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11627 {
11628 case 0: u16EffAddr = 0; break;
11629 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11630 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11631 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11632 }
11633
11634 /* Add the base and index registers to the disp. */
11635 switch (bRm & X86_MODRM_RM_MASK)
11636 {
11637 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11638 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11639 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11640 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11641 case 4: u16EffAddr += pCtx->si; break;
11642 case 5: u16EffAddr += pCtx->di; break;
11643 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11644 case 7: u16EffAddr += pCtx->bx; break;
11645 }
11646 }
11647
11648 *pGCPtrEff = u16EffAddr;
11649 }
11650 else
11651 {
11652 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11653 uint32_t u32EffAddr;
11654
11655 /* Handle the disp32 form with no registers first. */
11656 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11657 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11658 else
11659 {
11660 /* Get the register (or SIB) value. */
11661 switch ((bRm & X86_MODRM_RM_MASK))
11662 {
11663 case 0: u32EffAddr = pCtx->eax; break;
11664 case 1: u32EffAddr = pCtx->ecx; break;
11665 case 2: u32EffAddr = pCtx->edx; break;
11666 case 3: u32EffAddr = pCtx->ebx; break;
11667 case 4: /* SIB */
11668 {
11669 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11670
11671 /* Get the index and scale it. */
11672 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11673 {
11674 case 0: u32EffAddr = pCtx->eax; break;
11675 case 1: u32EffAddr = pCtx->ecx; break;
11676 case 2: u32EffAddr = pCtx->edx; break;
11677 case 3: u32EffAddr = pCtx->ebx; break;
11678 case 4: u32EffAddr = 0; /*none */ break;
11679 case 5: u32EffAddr = pCtx->ebp; break;
11680 case 6: u32EffAddr = pCtx->esi; break;
11681 case 7: u32EffAddr = pCtx->edi; break;
11682 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11683 }
11684 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11685
11686 /* add base */
11687 switch (bSib & X86_SIB_BASE_MASK)
11688 {
11689 case 0: u32EffAddr += pCtx->eax; break;
11690 case 1: u32EffAddr += pCtx->ecx; break;
11691 case 2: u32EffAddr += pCtx->edx; break;
11692 case 3: u32EffAddr += pCtx->ebx; break;
11693 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11694 case 5:
11695 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11696 {
11697 u32EffAddr += pCtx->ebp;
11698 SET_SS_DEF();
11699 }
11700 else
11701 {
11702 uint32_t u32Disp;
11703 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11704 u32EffAddr += u32Disp;
11705 }
11706 break;
11707 case 6: u32EffAddr += pCtx->esi; break;
11708 case 7: u32EffAddr += pCtx->edi; break;
11709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11710 }
11711 break;
11712 }
11713 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11714 case 6: u32EffAddr = pCtx->esi; break;
11715 case 7: u32EffAddr = pCtx->edi; break;
11716 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11717 }
11718
11719 /* Get and add the displacement. */
11720 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11721 {
11722 case 0:
11723 break;
11724 case 1:
11725 {
11726 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11727 u32EffAddr += i8Disp;
11728 break;
11729 }
11730 case 2:
11731 {
11732 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11733 u32EffAddr += u32Disp;
11734 break;
11735 }
11736 default:
11737 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11738 }
11739
11740 }
11741 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11742 *pGCPtrEff = u32EffAddr;
11743 else
11744 {
11745 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11746 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11747 }
11748 }
11749 }
11750 else
11751 {
11752 uint64_t u64EffAddr;
11753
11754 /* Handle the rip+disp32 form with no registers first. */
11755 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11756 {
11757 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11758 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11759 }
11760 else
11761 {
11762 /* Get the register (or SIB) value. */
11763 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11764 {
11765 case 0: u64EffAddr = pCtx->rax; break;
11766 case 1: u64EffAddr = pCtx->rcx; break;
11767 case 2: u64EffAddr = pCtx->rdx; break;
11768 case 3: u64EffAddr = pCtx->rbx; break;
11769 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11770 case 6: u64EffAddr = pCtx->rsi; break;
11771 case 7: u64EffAddr = pCtx->rdi; break;
11772 case 8: u64EffAddr = pCtx->r8; break;
11773 case 9: u64EffAddr = pCtx->r9; break;
11774 case 10: u64EffAddr = pCtx->r10; break;
11775 case 11: u64EffAddr = pCtx->r11; break;
11776 case 13: u64EffAddr = pCtx->r13; break;
11777 case 14: u64EffAddr = pCtx->r14; break;
11778 case 15: u64EffAddr = pCtx->r15; break;
11779 /* SIB */
11780 case 4:
11781 case 12:
11782 {
11783 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11784
11785 /* Get the index and scale it. */
11786 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11787 {
11788 case 0: u64EffAddr = pCtx->rax; break;
11789 case 1: u64EffAddr = pCtx->rcx; break;
11790 case 2: u64EffAddr = pCtx->rdx; break;
11791 case 3: u64EffAddr = pCtx->rbx; break;
11792 case 4: u64EffAddr = 0; /*none */ break;
11793 case 5: u64EffAddr = pCtx->rbp; break;
11794 case 6: u64EffAddr = pCtx->rsi; break;
11795 case 7: u64EffAddr = pCtx->rdi; break;
11796 case 8: u64EffAddr = pCtx->r8; break;
11797 case 9: u64EffAddr = pCtx->r9; break;
11798 case 10: u64EffAddr = pCtx->r10; break;
11799 case 11: u64EffAddr = pCtx->r11; break;
11800 case 12: u64EffAddr = pCtx->r12; break;
11801 case 13: u64EffAddr = pCtx->r13; break;
11802 case 14: u64EffAddr = pCtx->r14; break;
11803 case 15: u64EffAddr = pCtx->r15; break;
11804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11805 }
11806 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11807
11808 /* add base */
11809 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11810 {
11811 case 0: u64EffAddr += pCtx->rax; break;
11812 case 1: u64EffAddr += pCtx->rcx; break;
11813 case 2: u64EffAddr += pCtx->rdx; break;
11814 case 3: u64EffAddr += pCtx->rbx; break;
11815 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11816 case 6: u64EffAddr += pCtx->rsi; break;
11817 case 7: u64EffAddr += pCtx->rdi; break;
11818 case 8: u64EffAddr += pCtx->r8; break;
11819 case 9: u64EffAddr += pCtx->r9; break;
11820 case 10: u64EffAddr += pCtx->r10; break;
11821 case 11: u64EffAddr += pCtx->r11; break;
11822 case 12: u64EffAddr += pCtx->r12; break;
11823 case 14: u64EffAddr += pCtx->r14; break;
11824 case 15: u64EffAddr += pCtx->r15; break;
11825 /* complicated encodings */
11826 case 5:
11827 case 13:
11828 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11829 {
11830 if (!pVCpu->iem.s.uRexB)
11831 {
11832 u64EffAddr += pCtx->rbp;
11833 SET_SS_DEF();
11834 }
11835 else
11836 u64EffAddr += pCtx->r13;
11837 }
11838 else
11839 {
11840 uint32_t u32Disp;
11841 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11842 u64EffAddr += (int32_t)u32Disp;
11843 }
11844 break;
11845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11846 }
11847 break;
11848 }
11849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11850 }
11851
11852 /* Get and add the displacement. */
11853 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11854 {
11855 case 0:
11856 break;
11857 case 1:
11858 {
11859 int8_t i8Disp;
11860 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11861 u64EffAddr += i8Disp;
11862 break;
11863 }
11864 case 2:
11865 {
11866 uint32_t u32Disp;
11867 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11868 u64EffAddr += (int32_t)u32Disp;
11869 break;
11870 }
11871 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11872 }
11873
11874 }
11875
11876 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11877 *pGCPtrEff = u64EffAddr;
11878 else
11879 {
11880 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11881 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11882 }
11883 }
11884
11885 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11886 return VINF_SUCCESS;
11887}
11888
11889
11890/**
11891 * Calculates the effective address of a ModR/M memory operand.
11892 *
11893 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11894 *
11895 * @return Strict VBox status code.
11896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11897 * @param bRm The ModRM byte.
11898 * @param cbImm The size of any immediate following the
11899 * effective address opcode bytes. Important for
11900 * RIP relative addressing.
11901 * @param pGCPtrEff Where to return the effective address.
11902 * @param offRsp RSP displacement.
11903 */
11904IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11905{
11906 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11907 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11908# define SET_SS_DEF() \
11909 do \
11910 { \
11911 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11912 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11913 } while (0)
11914
11915 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11916 {
11917/** @todo Check the effective address size crap! */
11918 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11919 {
11920 uint16_t u16EffAddr;
11921
11922 /* Handle the disp16 form with no registers first. */
11923 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11924 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11925 else
11926 {
11927 /* Get the displacment. */
11928 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11929 {
11930 case 0: u16EffAddr = 0; break;
11931 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11932 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11933 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11934 }
11935
11936 /* Add the base and index registers to the disp. */
11937 switch (bRm & X86_MODRM_RM_MASK)
11938 {
11939 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11940 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11941 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11942 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11943 case 4: u16EffAddr += pCtx->si; break;
11944 case 5: u16EffAddr += pCtx->di; break;
11945 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11946 case 7: u16EffAddr += pCtx->bx; break;
11947 }
11948 }
11949
11950 *pGCPtrEff = u16EffAddr;
11951 }
11952 else
11953 {
11954 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11955 uint32_t u32EffAddr;
11956
11957 /* Handle the disp32 form with no registers first. */
11958 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11959 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11960 else
11961 {
11962 /* Get the register (or SIB) value. */
11963 switch ((bRm & X86_MODRM_RM_MASK))
11964 {
11965 case 0: u32EffAddr = pCtx->eax; break;
11966 case 1: u32EffAddr = pCtx->ecx; break;
11967 case 2: u32EffAddr = pCtx->edx; break;
11968 case 3: u32EffAddr = pCtx->ebx; break;
11969 case 4: /* SIB */
11970 {
11971 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11972
11973 /* Get the index and scale it. */
11974 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11975 {
11976 case 0: u32EffAddr = pCtx->eax; break;
11977 case 1: u32EffAddr = pCtx->ecx; break;
11978 case 2: u32EffAddr = pCtx->edx; break;
11979 case 3: u32EffAddr = pCtx->ebx; break;
11980 case 4: u32EffAddr = 0; /*none */ break;
11981 case 5: u32EffAddr = pCtx->ebp; break;
11982 case 6: u32EffAddr = pCtx->esi; break;
11983 case 7: u32EffAddr = pCtx->edi; break;
11984 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11985 }
11986 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11987
11988 /* add base */
11989 switch (bSib & X86_SIB_BASE_MASK)
11990 {
11991 case 0: u32EffAddr += pCtx->eax; break;
11992 case 1: u32EffAddr += pCtx->ecx; break;
11993 case 2: u32EffAddr += pCtx->edx; break;
11994 case 3: u32EffAddr += pCtx->ebx; break;
11995 case 4:
11996 u32EffAddr += pCtx->esp + offRsp;
11997 SET_SS_DEF();
11998 break;
11999 case 5:
12000 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12001 {
12002 u32EffAddr += pCtx->ebp;
12003 SET_SS_DEF();
12004 }
12005 else
12006 {
12007 uint32_t u32Disp;
12008 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12009 u32EffAddr += u32Disp;
12010 }
12011 break;
12012 case 6: u32EffAddr += pCtx->esi; break;
12013 case 7: u32EffAddr += pCtx->edi; break;
12014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12015 }
12016 break;
12017 }
12018 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12019 case 6: u32EffAddr = pCtx->esi; break;
12020 case 7: u32EffAddr = pCtx->edi; break;
12021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12022 }
12023
12024 /* Get and add the displacement. */
12025 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12026 {
12027 case 0:
12028 break;
12029 case 1:
12030 {
12031 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12032 u32EffAddr += i8Disp;
12033 break;
12034 }
12035 case 2:
12036 {
12037 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12038 u32EffAddr += u32Disp;
12039 break;
12040 }
12041 default:
12042 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12043 }
12044
12045 }
12046 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12047 *pGCPtrEff = u32EffAddr;
12048 else
12049 {
12050 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12051 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12052 }
12053 }
12054 }
12055 else
12056 {
12057 uint64_t u64EffAddr;
12058
12059 /* Handle the rip+disp32 form with no registers first. */
12060 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12061 {
12062 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12063 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12064 }
12065 else
12066 {
12067 /* Get the register (or SIB) value. */
12068 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12069 {
12070 case 0: u64EffAddr = pCtx->rax; break;
12071 case 1: u64EffAddr = pCtx->rcx; break;
12072 case 2: u64EffAddr = pCtx->rdx; break;
12073 case 3: u64EffAddr = pCtx->rbx; break;
12074 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12075 case 6: u64EffAddr = pCtx->rsi; break;
12076 case 7: u64EffAddr = pCtx->rdi; break;
12077 case 8: u64EffAddr = pCtx->r8; break;
12078 case 9: u64EffAddr = pCtx->r9; break;
12079 case 10: u64EffAddr = pCtx->r10; break;
12080 case 11: u64EffAddr = pCtx->r11; break;
12081 case 13: u64EffAddr = pCtx->r13; break;
12082 case 14: u64EffAddr = pCtx->r14; break;
12083 case 15: u64EffAddr = pCtx->r15; break;
12084 /* SIB */
12085 case 4:
12086 case 12:
12087 {
12088 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12089
12090 /* Get the index and scale it. */
12091 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12092 {
12093 case 0: u64EffAddr = pCtx->rax; break;
12094 case 1: u64EffAddr = pCtx->rcx; break;
12095 case 2: u64EffAddr = pCtx->rdx; break;
12096 case 3: u64EffAddr = pCtx->rbx; break;
12097 case 4: u64EffAddr = 0; /*none */ break;
12098 case 5: u64EffAddr = pCtx->rbp; break;
12099 case 6: u64EffAddr = pCtx->rsi; break;
12100 case 7: u64EffAddr = pCtx->rdi; break;
12101 case 8: u64EffAddr = pCtx->r8; break;
12102 case 9: u64EffAddr = pCtx->r9; break;
12103 case 10: u64EffAddr = pCtx->r10; break;
12104 case 11: u64EffAddr = pCtx->r11; break;
12105 case 12: u64EffAddr = pCtx->r12; break;
12106 case 13: u64EffAddr = pCtx->r13; break;
12107 case 14: u64EffAddr = pCtx->r14; break;
12108 case 15: u64EffAddr = pCtx->r15; break;
12109 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12110 }
12111 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12112
12113 /* add base */
12114 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12115 {
12116 case 0: u64EffAddr += pCtx->rax; break;
12117 case 1: u64EffAddr += pCtx->rcx; break;
12118 case 2: u64EffAddr += pCtx->rdx; break;
12119 case 3: u64EffAddr += pCtx->rbx; break;
12120 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12121 case 6: u64EffAddr += pCtx->rsi; break;
12122 case 7: u64EffAddr += pCtx->rdi; break;
12123 case 8: u64EffAddr += pCtx->r8; break;
12124 case 9: u64EffAddr += pCtx->r9; break;
12125 case 10: u64EffAddr += pCtx->r10; break;
12126 case 11: u64EffAddr += pCtx->r11; break;
12127 case 12: u64EffAddr += pCtx->r12; break;
12128 case 14: u64EffAddr += pCtx->r14; break;
12129 case 15: u64EffAddr += pCtx->r15; break;
12130 /* complicated encodings */
12131 case 5:
12132 case 13:
12133 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12134 {
12135 if (!pVCpu->iem.s.uRexB)
12136 {
12137 u64EffAddr += pCtx->rbp;
12138 SET_SS_DEF();
12139 }
12140 else
12141 u64EffAddr += pCtx->r13;
12142 }
12143 else
12144 {
12145 uint32_t u32Disp;
12146 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12147 u64EffAddr += (int32_t)u32Disp;
12148 }
12149 break;
12150 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12151 }
12152 break;
12153 }
12154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12155 }
12156
12157 /* Get and add the displacement. */
12158 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12159 {
12160 case 0:
12161 break;
12162 case 1:
12163 {
12164 int8_t i8Disp;
12165 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12166 u64EffAddr += i8Disp;
12167 break;
12168 }
12169 case 2:
12170 {
12171 uint32_t u32Disp;
12172 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12173 u64EffAddr += (int32_t)u32Disp;
12174 break;
12175 }
12176 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12177 }
12178
12179 }
12180
12181 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12182 *pGCPtrEff = u64EffAddr;
12183 else
12184 {
12185 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12186 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12187 }
12188 }
12189
12190 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12191 return VINF_SUCCESS;
12192}
12193
12194
12195#ifdef IEM_WITH_SETJMP
12196/**
12197 * Calculates the effective address of a ModR/M memory operand.
12198 *
12199 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12200 *
12201 * May longjmp on internal error.
12202 *
12203 * @return The effective address.
12204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12205 * @param bRm The ModRM byte.
12206 * @param cbImm The size of any immediate following the
12207 * effective address opcode bytes. Important for
12208 * RIP relative addressing.
12209 */
12210IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12211{
12212 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12213 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12214# define SET_SS_DEF() \
12215 do \
12216 { \
12217 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12218 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12219 } while (0)
12220
12221 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12222 {
12223/** @todo Check the effective address size crap! */
12224 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12225 {
12226 uint16_t u16EffAddr;
12227
12228 /* Handle the disp16 form with no registers first. */
12229 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12230 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12231 else
12232 {
12233 /* Get the displacment. */
12234 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12235 {
12236 case 0: u16EffAddr = 0; break;
12237 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12238 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12239 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12240 }
12241
12242 /* Add the base and index registers to the disp. */
12243 switch (bRm & X86_MODRM_RM_MASK)
12244 {
12245 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12246 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12247 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12248 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12249 case 4: u16EffAddr += pCtx->si; break;
12250 case 5: u16EffAddr += pCtx->di; break;
12251 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12252 case 7: u16EffAddr += pCtx->bx; break;
12253 }
12254 }
12255
12256 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12257 return u16EffAddr;
12258 }
12259
12260 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12261 uint32_t u32EffAddr;
12262
12263 /* Handle the disp32 form with no registers first. */
12264 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12265 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12266 else
12267 {
12268 /* Get the register (or SIB) value. */
12269 switch ((bRm & X86_MODRM_RM_MASK))
12270 {
12271 case 0: u32EffAddr = pCtx->eax; break;
12272 case 1: u32EffAddr = pCtx->ecx; break;
12273 case 2: u32EffAddr = pCtx->edx; break;
12274 case 3: u32EffAddr = pCtx->ebx; break;
12275 case 4: /* SIB */
12276 {
12277 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12278
12279 /* Get the index and scale it. */
12280 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12281 {
12282 case 0: u32EffAddr = pCtx->eax; break;
12283 case 1: u32EffAddr = pCtx->ecx; break;
12284 case 2: u32EffAddr = pCtx->edx; break;
12285 case 3: u32EffAddr = pCtx->ebx; break;
12286 case 4: u32EffAddr = 0; /*none */ break;
12287 case 5: u32EffAddr = pCtx->ebp; break;
12288 case 6: u32EffAddr = pCtx->esi; break;
12289 case 7: u32EffAddr = pCtx->edi; break;
12290 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12291 }
12292 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12293
12294 /* add base */
12295 switch (bSib & X86_SIB_BASE_MASK)
12296 {
12297 case 0: u32EffAddr += pCtx->eax; break;
12298 case 1: u32EffAddr += pCtx->ecx; break;
12299 case 2: u32EffAddr += pCtx->edx; break;
12300 case 3: u32EffAddr += pCtx->ebx; break;
12301 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12302 case 5:
12303 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12304 {
12305 u32EffAddr += pCtx->ebp;
12306 SET_SS_DEF();
12307 }
12308 else
12309 {
12310 uint32_t u32Disp;
12311 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12312 u32EffAddr += u32Disp;
12313 }
12314 break;
12315 case 6: u32EffAddr += pCtx->esi; break;
12316 case 7: u32EffAddr += pCtx->edi; break;
12317 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12318 }
12319 break;
12320 }
12321 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12322 case 6: u32EffAddr = pCtx->esi; break;
12323 case 7: u32EffAddr = pCtx->edi; break;
12324 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12325 }
12326
12327 /* Get and add the displacement. */
12328 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12329 {
12330 case 0:
12331 break;
12332 case 1:
12333 {
12334 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12335 u32EffAddr += i8Disp;
12336 break;
12337 }
12338 case 2:
12339 {
12340 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12341 u32EffAddr += u32Disp;
12342 break;
12343 }
12344 default:
12345 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12346 }
12347 }
12348
12349 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12350 {
12351 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12352 return u32EffAddr;
12353 }
12354 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12355 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12356 return u32EffAddr & UINT16_MAX;
12357 }
12358
12359 uint64_t u64EffAddr;
12360
12361 /* Handle the rip+disp32 form with no registers first. */
12362 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12363 {
12364 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12365 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12366 }
12367 else
12368 {
12369 /* Get the register (or SIB) value. */
12370 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12371 {
12372 case 0: u64EffAddr = pCtx->rax; break;
12373 case 1: u64EffAddr = pCtx->rcx; break;
12374 case 2: u64EffAddr = pCtx->rdx; break;
12375 case 3: u64EffAddr = pCtx->rbx; break;
12376 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12377 case 6: u64EffAddr = pCtx->rsi; break;
12378 case 7: u64EffAddr = pCtx->rdi; break;
12379 case 8: u64EffAddr = pCtx->r8; break;
12380 case 9: u64EffAddr = pCtx->r9; break;
12381 case 10: u64EffAddr = pCtx->r10; break;
12382 case 11: u64EffAddr = pCtx->r11; break;
12383 case 13: u64EffAddr = pCtx->r13; break;
12384 case 14: u64EffAddr = pCtx->r14; break;
12385 case 15: u64EffAddr = pCtx->r15; break;
12386 /* SIB */
12387 case 4:
12388 case 12:
12389 {
12390 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12391
12392 /* Get the index and scale it. */
12393 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12394 {
12395 case 0: u64EffAddr = pCtx->rax; break;
12396 case 1: u64EffAddr = pCtx->rcx; break;
12397 case 2: u64EffAddr = pCtx->rdx; break;
12398 case 3: u64EffAddr = pCtx->rbx; break;
12399 case 4: u64EffAddr = 0; /*none */ break;
12400 case 5: u64EffAddr = pCtx->rbp; break;
12401 case 6: u64EffAddr = pCtx->rsi; break;
12402 case 7: u64EffAddr = pCtx->rdi; break;
12403 case 8: u64EffAddr = pCtx->r8; break;
12404 case 9: u64EffAddr = pCtx->r9; break;
12405 case 10: u64EffAddr = pCtx->r10; break;
12406 case 11: u64EffAddr = pCtx->r11; break;
12407 case 12: u64EffAddr = pCtx->r12; break;
12408 case 13: u64EffAddr = pCtx->r13; break;
12409 case 14: u64EffAddr = pCtx->r14; break;
12410 case 15: u64EffAddr = pCtx->r15; break;
12411 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12412 }
12413 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12414
12415 /* add base */
12416 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12417 {
12418 case 0: u64EffAddr += pCtx->rax; break;
12419 case 1: u64EffAddr += pCtx->rcx; break;
12420 case 2: u64EffAddr += pCtx->rdx; break;
12421 case 3: u64EffAddr += pCtx->rbx; break;
12422 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12423 case 6: u64EffAddr += pCtx->rsi; break;
12424 case 7: u64EffAddr += pCtx->rdi; break;
12425 case 8: u64EffAddr += pCtx->r8; break;
12426 case 9: u64EffAddr += pCtx->r9; break;
12427 case 10: u64EffAddr += pCtx->r10; break;
12428 case 11: u64EffAddr += pCtx->r11; break;
12429 case 12: u64EffAddr += pCtx->r12; break;
12430 case 14: u64EffAddr += pCtx->r14; break;
12431 case 15: u64EffAddr += pCtx->r15; break;
12432 /* complicated encodings */
12433 case 5:
12434 case 13:
12435 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12436 {
12437 if (!pVCpu->iem.s.uRexB)
12438 {
12439 u64EffAddr += pCtx->rbp;
12440 SET_SS_DEF();
12441 }
12442 else
12443 u64EffAddr += pCtx->r13;
12444 }
12445 else
12446 {
12447 uint32_t u32Disp;
12448 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12449 u64EffAddr += (int32_t)u32Disp;
12450 }
12451 break;
12452 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12453 }
12454 break;
12455 }
12456 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12457 }
12458
12459 /* Get and add the displacement. */
12460 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12461 {
12462 case 0:
12463 break;
12464 case 1:
12465 {
12466 int8_t i8Disp;
12467 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12468 u64EffAddr += i8Disp;
12469 break;
12470 }
12471 case 2:
12472 {
12473 uint32_t u32Disp;
12474 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12475 u64EffAddr += (int32_t)u32Disp;
12476 break;
12477 }
12478 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12479 }
12480
12481 }
12482
12483 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12484 {
12485 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12486 return u64EffAddr;
12487 }
12488 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12489 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12490 return u64EffAddr & UINT32_MAX;
12491}
12492#endif /* IEM_WITH_SETJMP */
12493
12494
12495/** @} */
12496
12497
12498
12499/*
12500 * Include the instructions
12501 */
12502#include "IEMAllInstructions.cpp.h"
12503
12504
12505
12506
12507#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12508
12509/**
12510 * Sets up execution verification mode.
12511 */
12512IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12513{
12514 PVMCPU pVCpu = pVCpu;
12515 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12516
12517 /*
12518 * Always note down the address of the current instruction.
12519 */
12520 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12521 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12522
12523 /*
12524 * Enable verification and/or logging.
12525 */
12526 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12527 if ( fNewNoRem
12528 && ( 0
12529#if 0 /* auto enable on first paged protected mode interrupt */
12530 || ( pOrgCtx->eflags.Bits.u1IF
12531 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12532 && TRPMHasTrap(pVCpu)
12533 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12534#endif
12535#if 0
12536 || ( pOrgCtx->cs == 0x10
12537 && ( pOrgCtx->rip == 0x90119e3e
12538 || pOrgCtx->rip == 0x901d9810)
12539#endif
12540#if 0 /* Auto enable DSL - FPU stuff. */
12541 || ( pOrgCtx->cs == 0x10
12542 && (// pOrgCtx->rip == 0xc02ec07f
12543 //|| pOrgCtx->rip == 0xc02ec082
12544 //|| pOrgCtx->rip == 0xc02ec0c9
12545 0
12546 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12547#endif
12548#if 0 /* Auto enable DSL - fstp st0 stuff. */
12549 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12550#endif
12551#if 0
12552 || pOrgCtx->rip == 0x9022bb3a
12553#endif
12554#if 0
12555 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12556#endif
12557#if 0
12558 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12559 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12560#endif
12561#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12562 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12563 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12564 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12565#endif
12566#if 0 /* NT4SP1 - xadd early boot. */
12567 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12568#endif
12569#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12570 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12571#endif
12572#if 0 /* NT4SP1 - cmpxchg (AMD). */
12573 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12574#endif
12575#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12576 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12577#endif
12578#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12579 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12580
12581#endif
12582#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12583 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12584
12585#endif
12586#if 0 /* NT4SP1 - frstor [ecx] */
12587 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12588#endif
12589#if 0 /* xxxxxx - All long mode code. */
12590 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12591#endif
12592#if 0 /* rep movsq linux 3.7 64-bit boot. */
12593 || (pOrgCtx->rip == 0x0000000000100241)
12594#endif
12595#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12596 || (pOrgCtx->rip == 0x000000000215e240)
12597#endif
12598#if 0 /* DOS's size-overridden iret to v8086. */
12599 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12600#endif
12601 )
12602 )
12603 {
12604 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12605 RTLogFlags(NULL, "enabled");
12606 fNewNoRem = false;
12607 }
12608 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12609 {
12610 pVCpu->iem.s.fNoRem = fNewNoRem;
12611 if (!fNewNoRem)
12612 {
12613 LogAlways(("Enabling verification mode!\n"));
12614 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12615 }
12616 else
12617 LogAlways(("Disabling verification mode!\n"));
12618 }
12619
12620 /*
12621 * Switch state.
12622 */
12623 if (IEM_VERIFICATION_ENABLED(pVCpu))
12624 {
12625 static CPUMCTX s_DebugCtx; /* Ugly! */
12626
12627 s_DebugCtx = *pOrgCtx;
12628 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12629 }
12630
12631 /*
12632 * See if there is an interrupt pending in TRPM and inject it if we can.
12633 */
12634 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12635 if ( pOrgCtx->eflags.Bits.u1IF
12636 && TRPMHasTrap(pVCpu)
12637 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12638 {
12639 uint8_t u8TrapNo;
12640 TRPMEVENT enmType;
12641 RTGCUINT uErrCode;
12642 RTGCPTR uCr2;
12643 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12644 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12645 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12646 TRPMResetTrap(pVCpu);
12647 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12648 }
12649
12650 /*
12651 * Reset the counters.
12652 */
12653 pVCpu->iem.s.cIOReads = 0;
12654 pVCpu->iem.s.cIOWrites = 0;
12655 pVCpu->iem.s.fIgnoreRaxRdx = false;
12656 pVCpu->iem.s.fOverlappingMovs = false;
12657 pVCpu->iem.s.fProblematicMemory = false;
12658 pVCpu->iem.s.fUndefinedEFlags = 0;
12659
12660 if (IEM_VERIFICATION_ENABLED(pVCpu))
12661 {
12662 /*
12663 * Free all verification records.
12664 */
12665 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12666 pVCpu->iem.s.pIemEvtRecHead = NULL;
12667 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12668 do
12669 {
12670 while (pEvtRec)
12671 {
12672 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12673 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12674 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12675 pEvtRec = pNext;
12676 }
12677 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12678 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12679 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12680 } while (pEvtRec);
12681 }
12682}
12683
12684
12685/**
12686 * Allocate an event record.
12687 * @returns Pointer to a record.
12688 */
12689IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12690{
12691 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12692 return NULL;
12693
12694 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12695 if (pEvtRec)
12696 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12697 else
12698 {
12699 if (!pVCpu->iem.s.ppIemEvtRecNext)
12700 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12701
12702 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12703 if (!pEvtRec)
12704 return NULL;
12705 }
12706 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12707 pEvtRec->pNext = NULL;
12708 return pEvtRec;
12709}
12710
12711
12712/**
12713 * IOMMMIORead notification.
12714 */
12715VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12716{
12717 PVMCPU pVCpu = VMMGetCpu(pVM);
12718 if (!pVCpu)
12719 return;
12720 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12721 if (!pEvtRec)
12722 return;
12723 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12724 pEvtRec->u.RamRead.GCPhys = GCPhys;
12725 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12726 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12727 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12728}
12729
12730
12731/**
12732 * IOMMMIOWrite notification.
12733 */
12734VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12735{
12736 PVMCPU pVCpu = VMMGetCpu(pVM);
12737 if (!pVCpu)
12738 return;
12739 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12740 if (!pEvtRec)
12741 return;
12742 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12743 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12744 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12745 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12746 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12747 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12748 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12749 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12750 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12751}
12752
12753
12754/**
12755 * IOMIOPortRead notification.
12756 */
12757VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12758{
12759 PVMCPU pVCpu = VMMGetCpu(pVM);
12760 if (!pVCpu)
12761 return;
12762 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12763 if (!pEvtRec)
12764 return;
12765 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12766 pEvtRec->u.IOPortRead.Port = Port;
12767 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12768 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12769 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12770}
12771
12772/**
12773 * IOMIOPortWrite notification.
12774 */
12775VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12776{
12777 PVMCPU pVCpu = VMMGetCpu(pVM);
12778 if (!pVCpu)
12779 return;
12780 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12781 if (!pEvtRec)
12782 return;
12783 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12784 pEvtRec->u.IOPortWrite.Port = Port;
12785 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12786 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12787 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12788 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12789}
12790
12791
12792VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12793{
12794 PVMCPU pVCpu = VMMGetCpu(pVM);
12795 if (!pVCpu)
12796 return;
12797 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12798 if (!pEvtRec)
12799 return;
12800 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12801 pEvtRec->u.IOPortStrRead.Port = Port;
12802 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12803 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12804 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12805 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12806}
12807
12808
12809VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12810{
12811 PVMCPU pVCpu = VMMGetCpu(pVM);
12812 if (!pVCpu)
12813 return;
12814 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12815 if (!pEvtRec)
12816 return;
12817 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12818 pEvtRec->u.IOPortStrWrite.Port = Port;
12819 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12820 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12821 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12822 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12823}
12824
12825
12826/**
12827 * Fakes and records an I/O port read.
12828 *
12829 * @returns VINF_SUCCESS.
12830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12831 * @param Port The I/O port.
12832 * @param pu32Value Where to store the fake value.
12833 * @param cbValue The size of the access.
12834 */
12835IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12836{
12837 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12838 if (pEvtRec)
12839 {
12840 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12841 pEvtRec->u.IOPortRead.Port = Port;
12842 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12843 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12844 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12845 }
12846 pVCpu->iem.s.cIOReads++;
12847 *pu32Value = 0xcccccccc;
12848 return VINF_SUCCESS;
12849}
12850
12851
12852/**
12853 * Fakes and records an I/O port write.
12854 *
12855 * @returns VINF_SUCCESS.
12856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12857 * @param Port The I/O port.
12858 * @param u32Value The value being written.
12859 * @param cbValue The size of the access.
12860 */
12861IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12862{
12863 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12864 if (pEvtRec)
12865 {
12866 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12867 pEvtRec->u.IOPortWrite.Port = Port;
12868 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12869 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12870 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12871 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12872 }
12873 pVCpu->iem.s.cIOWrites++;
12874 return VINF_SUCCESS;
12875}
12876
12877
12878/**
12879 * Used to add extra details about a stub case.
12880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12881 */
12882IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12883{
12884 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12885 PVM pVM = pVCpu->CTX_SUFF(pVM);
12886 PVMCPU pVCpu = pVCpu;
12887 char szRegs[4096];
12888 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12889 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12890 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12891 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12892 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12893 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12894 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12895 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12896 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12897 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12898 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12899 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12900 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12901 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12902 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12903 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12904 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12905 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12906 " efer=%016VR{efer}\n"
12907 " pat=%016VR{pat}\n"
12908 " sf_mask=%016VR{sf_mask}\n"
12909 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12910 " lstar=%016VR{lstar}\n"
12911 " star=%016VR{star} cstar=%016VR{cstar}\n"
12912 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12913 );
12914
12915 char szInstr1[256];
12916 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12917 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12918 szInstr1, sizeof(szInstr1), NULL);
12919 char szInstr2[256];
12920 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12921 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12922 szInstr2, sizeof(szInstr2), NULL);
12923
12924 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12925}
12926
12927
12928/**
12929 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12930 * dump to the assertion info.
12931 *
12932 * @param pEvtRec The record to dump.
12933 */
12934IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12935{
12936 switch (pEvtRec->enmEvent)
12937 {
12938 case IEMVERIFYEVENT_IOPORT_READ:
12939 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12940 pEvtRec->u.IOPortWrite.Port,
12941 pEvtRec->u.IOPortWrite.cbValue);
12942 break;
12943 case IEMVERIFYEVENT_IOPORT_WRITE:
12944 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12945 pEvtRec->u.IOPortWrite.Port,
12946 pEvtRec->u.IOPortWrite.cbValue,
12947 pEvtRec->u.IOPortWrite.u32Value);
12948 break;
12949 case IEMVERIFYEVENT_IOPORT_STR_READ:
12950 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12951 pEvtRec->u.IOPortStrWrite.Port,
12952 pEvtRec->u.IOPortStrWrite.cbValue,
12953 pEvtRec->u.IOPortStrWrite.cTransfers);
12954 break;
12955 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12956 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12957 pEvtRec->u.IOPortStrWrite.Port,
12958 pEvtRec->u.IOPortStrWrite.cbValue,
12959 pEvtRec->u.IOPortStrWrite.cTransfers);
12960 break;
12961 case IEMVERIFYEVENT_RAM_READ:
12962 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12963 pEvtRec->u.RamRead.GCPhys,
12964 pEvtRec->u.RamRead.cb);
12965 break;
12966 case IEMVERIFYEVENT_RAM_WRITE:
12967 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12968 pEvtRec->u.RamWrite.GCPhys,
12969 pEvtRec->u.RamWrite.cb,
12970 (int)pEvtRec->u.RamWrite.cb,
12971 pEvtRec->u.RamWrite.ab);
12972 break;
12973 default:
12974 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12975 break;
12976 }
12977}
12978
12979
12980/**
12981 * Raises an assertion on the specified record, showing the given message with
12982 * a record dump attached.
12983 *
12984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12985 * @param pEvtRec1 The first record.
12986 * @param pEvtRec2 The second record.
12987 * @param pszMsg The message explaining why we're asserting.
12988 */
12989IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12990{
12991 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12992 iemVerifyAssertAddRecordDump(pEvtRec1);
12993 iemVerifyAssertAddRecordDump(pEvtRec2);
12994 iemVerifyAssertMsg2(pVCpu);
12995 RTAssertPanic();
12996}
12997
12998
12999/**
13000 * Raises an assertion on the specified record, showing the given message with
13001 * a record dump attached.
13002 *
13003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13004 * @param pEvtRec1 The first record.
13005 * @param pszMsg The message explaining why we're asserting.
13006 */
13007IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13008{
13009 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13010 iemVerifyAssertAddRecordDump(pEvtRec);
13011 iemVerifyAssertMsg2(pVCpu);
13012 RTAssertPanic();
13013}
13014
13015
13016/**
13017 * Verifies a write record.
13018 *
13019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13020 * @param pEvtRec The write record.
13021 * @param fRem Set if REM was doing the other executing. If clear
13022 * it was HM.
13023 */
13024IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13025{
13026 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13027 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13028 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13029 if ( RT_FAILURE(rc)
13030 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13031 {
13032 /* fend off ins */
13033 if ( !pVCpu->iem.s.cIOReads
13034 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13035 || ( pEvtRec->u.RamWrite.cb != 1
13036 && pEvtRec->u.RamWrite.cb != 2
13037 && pEvtRec->u.RamWrite.cb != 4) )
13038 {
13039 /* fend off ROMs and MMIO */
13040 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13041 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13042 {
13043 /* fend off fxsave */
13044 if (pEvtRec->u.RamWrite.cb != 512)
13045 {
13046 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13047 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13048 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13049 RTAssertMsg2Add("%s: %.*Rhxs\n"
13050 "iem: %.*Rhxs\n",
13051 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13052 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13053 iemVerifyAssertAddRecordDump(pEvtRec);
13054 iemVerifyAssertMsg2(pVCpu);
13055 RTAssertPanic();
13056 }
13057 }
13058 }
13059 }
13060
13061}
13062
13063/**
13064 * Performs the post-execution verfication checks.
13065 */
13066IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13067{
13068 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13069 return rcStrictIem;
13070
13071 /*
13072 * Switch back the state.
13073 */
13074 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13075 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13076 Assert(pOrgCtx != pDebugCtx);
13077 IEM_GET_CTX(pVCpu) = pOrgCtx;
13078
13079 /*
13080 * Execute the instruction in REM.
13081 */
13082 bool fRem = false;
13083 PVM pVM = pVCpu->CTX_SUFF(pVM);
13084 PVMCPU pVCpu = pVCpu;
13085 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13086#ifdef IEM_VERIFICATION_MODE_FULL_HM
13087 if ( HMIsEnabled(pVM)
13088 && pVCpu->iem.s.cIOReads == 0
13089 && pVCpu->iem.s.cIOWrites == 0
13090 && !pVCpu->iem.s.fProblematicMemory)
13091 {
13092 uint64_t uStartRip = pOrgCtx->rip;
13093 unsigned iLoops = 0;
13094 do
13095 {
13096 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13097 iLoops++;
13098 } while ( rc == VINF_SUCCESS
13099 || ( rc == VINF_EM_DBG_STEPPED
13100 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13101 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13102 || ( pOrgCtx->rip != pDebugCtx->rip
13103 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13104 && iLoops < 8) );
13105 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13106 rc = VINF_SUCCESS;
13107 }
13108#endif
13109 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13110 || rc == VINF_IOM_R3_IOPORT_READ
13111 || rc == VINF_IOM_R3_IOPORT_WRITE
13112 || rc == VINF_IOM_R3_MMIO_READ
13113 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13114 || rc == VINF_IOM_R3_MMIO_WRITE
13115 || rc == VINF_CPUM_R3_MSR_READ
13116 || rc == VINF_CPUM_R3_MSR_WRITE
13117 || rc == VINF_EM_RESCHEDULE
13118 )
13119 {
13120 EMRemLock(pVM);
13121 rc = REMR3EmulateInstruction(pVM, pVCpu);
13122 AssertRC(rc);
13123 EMRemUnlock(pVM);
13124 fRem = true;
13125 }
13126
13127# if 1 /* Skip unimplemented instructions for now. */
13128 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13129 {
13130 IEM_GET_CTX(pVCpu) = pOrgCtx;
13131 if (rc == VINF_EM_DBG_STEPPED)
13132 return VINF_SUCCESS;
13133 return rc;
13134 }
13135# endif
13136
13137 /*
13138 * Compare the register states.
13139 */
13140 unsigned cDiffs = 0;
13141 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13142 {
13143 //Log(("REM and IEM ends up with different registers!\n"));
13144 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13145
13146# define CHECK_FIELD(a_Field) \
13147 do \
13148 { \
13149 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13150 { \
13151 switch (sizeof(pOrgCtx->a_Field)) \
13152 { \
13153 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13154 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13155 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13156 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13157 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13158 } \
13159 cDiffs++; \
13160 } \
13161 } while (0)
13162# define CHECK_XSTATE_FIELD(a_Field) \
13163 do \
13164 { \
13165 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13166 { \
13167 switch (sizeof(pOrgXState->a_Field)) \
13168 { \
13169 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13170 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13171 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13172 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13173 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13174 } \
13175 cDiffs++; \
13176 } \
13177 } while (0)
13178
13179# define CHECK_BIT_FIELD(a_Field) \
13180 do \
13181 { \
13182 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13183 { \
13184 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13185 cDiffs++; \
13186 } \
13187 } while (0)
13188
13189# define CHECK_SEL(a_Sel) \
13190 do \
13191 { \
13192 CHECK_FIELD(a_Sel.Sel); \
13193 CHECK_FIELD(a_Sel.Attr.u); \
13194 CHECK_FIELD(a_Sel.u64Base); \
13195 CHECK_FIELD(a_Sel.u32Limit); \
13196 CHECK_FIELD(a_Sel.fFlags); \
13197 } while (0)
13198
13199 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13200 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13201
13202#if 1 /* The recompiler doesn't update these the intel way. */
13203 if (fRem)
13204 {
13205 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13206 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13207 pOrgXState->x87.CS = pDebugXState->x87.CS;
13208 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13209 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13210 pOrgXState->x87.DS = pDebugXState->x87.DS;
13211 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13212 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13213 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13214 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13215 }
13216#endif
13217 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13218 {
13219 RTAssertMsg2Weak(" the FPU state differs\n");
13220 cDiffs++;
13221 CHECK_XSTATE_FIELD(x87.FCW);
13222 CHECK_XSTATE_FIELD(x87.FSW);
13223 CHECK_XSTATE_FIELD(x87.FTW);
13224 CHECK_XSTATE_FIELD(x87.FOP);
13225 CHECK_XSTATE_FIELD(x87.FPUIP);
13226 CHECK_XSTATE_FIELD(x87.CS);
13227 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13228 CHECK_XSTATE_FIELD(x87.FPUDP);
13229 CHECK_XSTATE_FIELD(x87.DS);
13230 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13231 CHECK_XSTATE_FIELD(x87.MXCSR);
13232 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13233 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13234 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13235 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13236 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13237 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13238 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13239 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13240 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13241 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13242 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13243 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13244 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13245 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13246 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13247 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13248 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13249 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13250 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13251 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13252 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13253 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13254 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13255 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13256 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13257 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13258 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13259 }
13260 CHECK_FIELD(rip);
13261 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13262 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13263 {
13264 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13265 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13266 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13267 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13268 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13269 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13270 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13271 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13272 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13273 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13274 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13275 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13276 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13277 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13278 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13279 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13280 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13281 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13282 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13283 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13284 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13285 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13286 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13287 }
13288
13289 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13290 CHECK_FIELD(rax);
13291 CHECK_FIELD(rcx);
13292 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13293 CHECK_FIELD(rdx);
13294 CHECK_FIELD(rbx);
13295 CHECK_FIELD(rsp);
13296 CHECK_FIELD(rbp);
13297 CHECK_FIELD(rsi);
13298 CHECK_FIELD(rdi);
13299 CHECK_FIELD(r8);
13300 CHECK_FIELD(r9);
13301 CHECK_FIELD(r10);
13302 CHECK_FIELD(r11);
13303 CHECK_FIELD(r12);
13304 CHECK_FIELD(r13);
13305 CHECK_SEL(cs);
13306 CHECK_SEL(ss);
13307 CHECK_SEL(ds);
13308 CHECK_SEL(es);
13309 CHECK_SEL(fs);
13310 CHECK_SEL(gs);
13311 CHECK_FIELD(cr0);
13312
13313 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13314 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13315 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13316 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13317 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13318 {
13319 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13320 { /* ignore */ }
13321 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13322 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13323 && fRem)
13324 { /* ignore */ }
13325 else
13326 CHECK_FIELD(cr2);
13327 }
13328 CHECK_FIELD(cr3);
13329 CHECK_FIELD(cr4);
13330 CHECK_FIELD(dr[0]);
13331 CHECK_FIELD(dr[1]);
13332 CHECK_FIELD(dr[2]);
13333 CHECK_FIELD(dr[3]);
13334 CHECK_FIELD(dr[6]);
13335 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13336 CHECK_FIELD(dr[7]);
13337 CHECK_FIELD(gdtr.cbGdt);
13338 CHECK_FIELD(gdtr.pGdt);
13339 CHECK_FIELD(idtr.cbIdt);
13340 CHECK_FIELD(idtr.pIdt);
13341 CHECK_SEL(ldtr);
13342 CHECK_SEL(tr);
13343 CHECK_FIELD(SysEnter.cs);
13344 CHECK_FIELD(SysEnter.eip);
13345 CHECK_FIELD(SysEnter.esp);
13346 CHECK_FIELD(msrEFER);
13347 CHECK_FIELD(msrSTAR);
13348 CHECK_FIELD(msrPAT);
13349 CHECK_FIELD(msrLSTAR);
13350 CHECK_FIELD(msrCSTAR);
13351 CHECK_FIELD(msrSFMASK);
13352 CHECK_FIELD(msrKERNELGSBASE);
13353
13354 if (cDiffs != 0)
13355 {
13356 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13357 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13358 RTAssertPanic();
13359 static bool volatile s_fEnterDebugger = true;
13360 if (s_fEnterDebugger)
13361 DBGFSTOP(pVM);
13362
13363# if 1 /* Ignore unimplemented instructions for now. */
13364 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13365 rcStrictIem = VINF_SUCCESS;
13366# endif
13367 }
13368# undef CHECK_FIELD
13369# undef CHECK_BIT_FIELD
13370 }
13371
13372 /*
13373 * If the register state compared fine, check the verification event
13374 * records.
13375 */
13376 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13377 {
13378 /*
13379 * Compare verficiation event records.
13380 * - I/O port accesses should be a 1:1 match.
13381 */
13382 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13383 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13384 while (pIemRec && pOtherRec)
13385 {
13386 /* Since we might miss RAM writes and reads, ignore reads and check
13387 that any written memory is the same extra ones. */
13388 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13389 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13390 && pIemRec->pNext)
13391 {
13392 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13393 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13394 pIemRec = pIemRec->pNext;
13395 }
13396
13397 /* Do the compare. */
13398 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13399 {
13400 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13401 break;
13402 }
13403 bool fEquals;
13404 switch (pIemRec->enmEvent)
13405 {
13406 case IEMVERIFYEVENT_IOPORT_READ:
13407 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13408 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13409 break;
13410 case IEMVERIFYEVENT_IOPORT_WRITE:
13411 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13412 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13413 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13414 break;
13415 case IEMVERIFYEVENT_IOPORT_STR_READ:
13416 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13417 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13418 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13419 break;
13420 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13421 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13422 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13423 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13424 break;
13425 case IEMVERIFYEVENT_RAM_READ:
13426 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13427 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13428 break;
13429 case IEMVERIFYEVENT_RAM_WRITE:
13430 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13431 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13432 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13433 break;
13434 default:
13435 fEquals = false;
13436 break;
13437 }
13438 if (!fEquals)
13439 {
13440 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13441 break;
13442 }
13443
13444 /* advance */
13445 pIemRec = pIemRec->pNext;
13446 pOtherRec = pOtherRec->pNext;
13447 }
13448
13449 /* Ignore extra writes and reads. */
13450 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13451 {
13452 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13453 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13454 pIemRec = pIemRec->pNext;
13455 }
13456 if (pIemRec != NULL)
13457 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13458 else if (pOtherRec != NULL)
13459 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13460 }
13461 IEM_GET_CTX(pVCpu) = pOrgCtx;
13462
13463 return rcStrictIem;
13464}
13465
13466#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13467
13468/* stubs */
13469IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13470{
13471 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13472 return VERR_INTERNAL_ERROR;
13473}
13474
13475IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13476{
13477 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13478 return VERR_INTERNAL_ERROR;
13479}
13480
13481#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13482
13483
13484#ifdef LOG_ENABLED
13485/**
13486 * Logs the current instruction.
13487 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13488 * @param pCtx The current CPU context.
13489 * @param fSameCtx Set if we have the same context information as the VMM,
13490 * clear if we may have already executed an instruction in
13491 * our debug context. When clear, we assume IEMCPU holds
13492 * valid CPU mode info.
13493 */
13494IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13495{
13496# ifdef IN_RING3
13497 if (LogIs2Enabled())
13498 {
13499 char szInstr[256];
13500 uint32_t cbInstr = 0;
13501 if (fSameCtx)
13502 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13503 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13504 szInstr, sizeof(szInstr), &cbInstr);
13505 else
13506 {
13507 uint32_t fFlags = 0;
13508 switch (pVCpu->iem.s.enmCpuMode)
13509 {
13510 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13511 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13512 case IEMMODE_16BIT:
13513 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13514 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13515 else
13516 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13517 break;
13518 }
13519 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13520 szInstr, sizeof(szInstr), &cbInstr);
13521 }
13522
13523 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13524 Log2(("****\n"
13525 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13526 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13527 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13528 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13529 " %s\n"
13530 ,
13531 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13532 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13533 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13534 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13535 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13536 szInstr));
13537
13538 if (LogIs3Enabled())
13539 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13540 }
13541 else
13542# endif
13543 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13544 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13545}
13546#endif
13547
13548
13549/**
13550 * Makes status code addjustments (pass up from I/O and access handler)
13551 * as well as maintaining statistics.
13552 *
13553 * @returns Strict VBox status code to pass up.
13554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13555 * @param rcStrict The status from executing an instruction.
13556 */
13557DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13558{
13559 if (rcStrict != VINF_SUCCESS)
13560 {
13561 if (RT_SUCCESS(rcStrict))
13562 {
13563 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13564 || rcStrict == VINF_IOM_R3_IOPORT_READ
13565 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13566 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13567 || rcStrict == VINF_IOM_R3_MMIO_READ
13568 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13569 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13570 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13571 || rcStrict == VINF_CPUM_R3_MSR_READ
13572 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13573 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13574 || rcStrict == VINF_EM_RAW_TO_R3
13575 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13576 /* raw-mode / virt handlers only: */
13577 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13578 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13579 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13580 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13581 || rcStrict == VINF_SELM_SYNC_GDT
13582 || rcStrict == VINF_CSAM_PENDING_ACTION
13583 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13584 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13585/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13586 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13587 if (rcPassUp == VINF_SUCCESS)
13588 pVCpu->iem.s.cRetInfStatuses++;
13589 else if ( rcPassUp < VINF_EM_FIRST
13590 || rcPassUp > VINF_EM_LAST
13591 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13592 {
13593 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13594 pVCpu->iem.s.cRetPassUpStatus++;
13595 rcStrict = rcPassUp;
13596 }
13597 else
13598 {
13599 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13600 pVCpu->iem.s.cRetInfStatuses++;
13601 }
13602 }
13603 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13604 pVCpu->iem.s.cRetAspectNotImplemented++;
13605 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13606 pVCpu->iem.s.cRetInstrNotImplemented++;
13607#ifdef IEM_VERIFICATION_MODE_FULL
13608 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13609 rcStrict = VINF_SUCCESS;
13610#endif
13611 else
13612 pVCpu->iem.s.cRetErrStatuses++;
13613 }
13614 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13615 {
13616 pVCpu->iem.s.cRetPassUpStatus++;
13617 rcStrict = pVCpu->iem.s.rcPassUp;
13618 }
13619
13620 return rcStrict;
13621}
13622
13623
13624/**
13625 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13626 * IEMExecOneWithPrefetchedByPC.
13627 *
13628 * Similar code is found in IEMExecLots.
13629 *
13630 * @return Strict VBox status code.
13631 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13633 * @param fExecuteInhibit If set, execute the instruction following CLI,
13634 * POP SS and MOV SS,GR.
13635 */
13636#ifdef __GNUC__
13637DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13638#else
13639DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13640#endif
13641{
13642#ifdef IEM_WITH_SETJMP
13643 VBOXSTRICTRC rcStrict;
13644 jmp_buf JmpBuf;
13645 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13646 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13647 if ((rcStrict = setjmp(JmpBuf)) == 0)
13648 {
13649 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13650 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13651 }
13652 else
13653 pVCpu->iem.s.cLongJumps++;
13654 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13655#else
13656 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13657 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13658#endif
13659 if (rcStrict == VINF_SUCCESS)
13660 pVCpu->iem.s.cInstructions++;
13661 if (pVCpu->iem.s.cActiveMappings > 0)
13662 {
13663 Assert(rcStrict != VINF_SUCCESS);
13664 iemMemRollback(pVCpu);
13665 }
13666//#ifdef DEBUG
13667// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13668//#endif
13669
13670 /* Execute the next instruction as well if a cli, pop ss or
13671 mov ss, Gr has just completed successfully. */
13672 if ( fExecuteInhibit
13673 && rcStrict == VINF_SUCCESS
13674 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13675 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13676 {
13677 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13678 if (rcStrict == VINF_SUCCESS)
13679 {
13680#ifdef LOG_ENABLED
13681 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13682#endif
13683#ifdef IEM_WITH_SETJMP
13684 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13685 if ((rcStrict = setjmp(JmpBuf)) == 0)
13686 {
13687 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13688 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13689 }
13690 else
13691 pVCpu->iem.s.cLongJumps++;
13692 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13693#else
13694 IEM_OPCODE_GET_NEXT_U8(&b);
13695 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13696#endif
13697 if (rcStrict == VINF_SUCCESS)
13698 pVCpu->iem.s.cInstructions++;
13699 if (pVCpu->iem.s.cActiveMappings > 0)
13700 {
13701 Assert(rcStrict != VINF_SUCCESS);
13702 iemMemRollback(pVCpu);
13703 }
13704 }
13705 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13706 }
13707
13708 /*
13709 * Return value fiddling, statistics and sanity assertions.
13710 */
13711 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13712
13713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13714 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13715#if defined(IEM_VERIFICATION_MODE_FULL)
13716 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13719 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13720#endif
13721 return rcStrict;
13722}
13723
13724
13725#ifdef IN_RC
13726/**
13727 * Re-enters raw-mode or ensure we return to ring-3.
13728 *
13729 * @returns rcStrict, maybe modified.
13730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13731 * @param pCtx The current CPU context.
13732 * @param rcStrict The status code returne by the interpreter.
13733 */
13734DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13735{
13736 if ( !pVCpu->iem.s.fInPatchCode
13737 && ( rcStrict == VINF_SUCCESS
13738 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13739 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13740 {
13741 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13742 CPUMRawEnter(pVCpu);
13743 else
13744 {
13745 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13746 rcStrict = VINF_EM_RESCHEDULE;
13747 }
13748 }
13749 return rcStrict;
13750}
13751#endif
13752
13753
13754/**
13755 * Execute one instruction.
13756 *
13757 * @return Strict VBox status code.
13758 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13759 */
13760VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13761{
13762#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13763 if (++pVCpu->iem.s.cVerifyDepth == 1)
13764 iemExecVerificationModeSetup(pVCpu);
13765#endif
13766#ifdef LOG_ENABLED
13767 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13768 iemLogCurInstr(pVCpu, pCtx, true);
13769#endif
13770
13771 /*
13772 * Do the decoding and emulation.
13773 */
13774 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13775 if (rcStrict == VINF_SUCCESS)
13776 rcStrict = iemExecOneInner(pVCpu, true);
13777
13778#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13779 /*
13780 * Assert some sanity.
13781 */
13782 if (pVCpu->iem.s.cVerifyDepth == 1)
13783 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13784 pVCpu->iem.s.cVerifyDepth--;
13785#endif
13786#ifdef IN_RC
13787 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13788#endif
13789 if (rcStrict != VINF_SUCCESS)
13790 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13791 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13792 return rcStrict;
13793}
13794
13795
13796VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13797{
13798 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13799 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13800
13801 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13802 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13803 if (rcStrict == VINF_SUCCESS)
13804 {
13805 rcStrict = iemExecOneInner(pVCpu, true);
13806 if (pcbWritten)
13807 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13808 }
13809
13810#ifdef IN_RC
13811 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13812#endif
13813 return rcStrict;
13814}
13815
13816
13817VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13818 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13819{
13820 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13821 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13822
13823 VBOXSTRICTRC rcStrict;
13824 if ( cbOpcodeBytes
13825 && pCtx->rip == OpcodeBytesPC)
13826 {
13827 iemInitDecoder(pVCpu, false);
13828#ifdef IEM_WITH_CODE_TLB
13829 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13830 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13831 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13832 pVCpu->iem.s.offCurInstrStart = 0;
13833 pVCpu->iem.s.offInstrNextByte = 0;
13834#else
13835 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13836 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13837#endif
13838 rcStrict = VINF_SUCCESS;
13839 }
13840 else
13841 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13842 if (rcStrict == VINF_SUCCESS)
13843 {
13844 rcStrict = iemExecOneInner(pVCpu, true);
13845 }
13846
13847#ifdef IN_RC
13848 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13849#endif
13850 return rcStrict;
13851}
13852
13853
13854VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13855{
13856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13857 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13858
13859 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13860 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13861 if (rcStrict == VINF_SUCCESS)
13862 {
13863 rcStrict = iemExecOneInner(pVCpu, false);
13864 if (pcbWritten)
13865 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13866 }
13867
13868#ifdef IN_RC
13869 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13870#endif
13871 return rcStrict;
13872}
13873
13874
13875VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13876 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13877{
13878 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13879 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13880
13881 VBOXSTRICTRC rcStrict;
13882 if ( cbOpcodeBytes
13883 && pCtx->rip == OpcodeBytesPC)
13884 {
13885 iemInitDecoder(pVCpu, true);
13886#ifdef IEM_WITH_CODE_TLB
13887 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13888 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13889 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13890 pVCpu->iem.s.offCurInstrStart = 0;
13891 pVCpu->iem.s.offInstrNextByte = 0;
13892#else
13893 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13894 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13895#endif
13896 rcStrict = VINF_SUCCESS;
13897 }
13898 else
13899 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13900 if (rcStrict == VINF_SUCCESS)
13901 rcStrict = iemExecOneInner(pVCpu, false);
13902
13903#ifdef IN_RC
13904 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13905#endif
13906 return rcStrict;
13907}
13908
13909
13910/**
13911 * For debugging DISGetParamSize, may come in handy.
13912 *
13913 * @returns Strict VBox status code.
13914 * @param pVCpu The cross context virtual CPU structure of the
13915 * calling EMT.
13916 * @param pCtxCore The context core structure.
13917 * @param OpcodeBytesPC The PC of the opcode bytes.
13918 * @param pvOpcodeBytes Prefeched opcode bytes.
13919 * @param cbOpcodeBytes Number of prefetched bytes.
13920 * @param pcbWritten Where to return the number of bytes written.
13921 * Optional.
13922 */
13923VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13924 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13925 uint32_t *pcbWritten)
13926{
13927 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13928 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13929
13930 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13931 VBOXSTRICTRC rcStrict;
13932 if ( cbOpcodeBytes
13933 && pCtx->rip == OpcodeBytesPC)
13934 {
13935 iemInitDecoder(pVCpu, true);
13936#ifdef IEM_WITH_CODE_TLB
13937 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13938 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13939 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13940 pVCpu->iem.s.offCurInstrStart = 0;
13941 pVCpu->iem.s.offInstrNextByte = 0;
13942#else
13943 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13944 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13945#endif
13946 rcStrict = VINF_SUCCESS;
13947 }
13948 else
13949 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13950 if (rcStrict == VINF_SUCCESS)
13951 {
13952 rcStrict = iemExecOneInner(pVCpu, false);
13953 if (pcbWritten)
13954 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13955 }
13956
13957#ifdef IN_RC
13958 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13959#endif
13960 return rcStrict;
13961}
13962
13963
13964VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13965{
13966 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13967
13968#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13969 /*
13970 * See if there is an interrupt pending in TRPM, inject it if we can.
13971 */
13972 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13973# ifdef IEM_VERIFICATION_MODE_FULL
13974 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13975# endif
13976 if ( pCtx->eflags.Bits.u1IF
13977 && TRPMHasTrap(pVCpu)
13978 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13979 {
13980 uint8_t u8TrapNo;
13981 TRPMEVENT enmType;
13982 RTGCUINT uErrCode;
13983 RTGCPTR uCr2;
13984 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13985 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13986 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13987 TRPMResetTrap(pVCpu);
13988 }
13989
13990 /*
13991 * Log the state.
13992 */
13993# ifdef LOG_ENABLED
13994 iemLogCurInstr(pVCpu, pCtx, true);
13995# endif
13996
13997 /*
13998 * Do the decoding and emulation.
13999 */
14000 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14001 if (rcStrict == VINF_SUCCESS)
14002 rcStrict = iemExecOneInner(pVCpu, true);
14003
14004 /*
14005 * Assert some sanity.
14006 */
14007 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14008
14009 /*
14010 * Log and return.
14011 */
14012 if (rcStrict != VINF_SUCCESS)
14013 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14014 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14015 if (pcInstructions)
14016 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14017 return rcStrict;
14018
14019#else /* Not verification mode */
14020
14021 /*
14022 * See if there is an interrupt pending in TRPM, inject it if we can.
14023 */
14024 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14025# ifdef IEM_VERIFICATION_MODE_FULL
14026 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14027# endif
14028 if ( pCtx->eflags.Bits.u1IF
14029 && TRPMHasTrap(pVCpu)
14030 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14031 {
14032 uint8_t u8TrapNo;
14033 TRPMEVENT enmType;
14034 RTGCUINT uErrCode;
14035 RTGCPTR uCr2;
14036 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14037 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14038 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14039 TRPMResetTrap(pVCpu);
14040 }
14041
14042 /*
14043 * Initial decoder init w/ prefetch, then setup setjmp.
14044 */
14045 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14046 if (rcStrict == VINF_SUCCESS)
14047 {
14048# ifdef IEM_WITH_SETJMP
14049 jmp_buf JmpBuf;
14050 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14051 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14052 pVCpu->iem.s.cActiveMappings = 0;
14053 if ((rcStrict = setjmp(JmpBuf)) == 0)
14054# endif
14055 {
14056 /*
14057 * The run loop. We limit ourselves to 4096 instructions right now.
14058 */
14059 PVM pVM = pVCpu->CTX_SUFF(pVM);
14060 uint32_t cInstr = 4096;
14061 for (;;)
14062 {
14063 /*
14064 * Log the state.
14065 */
14066# ifdef LOG_ENABLED
14067 iemLogCurInstr(pVCpu, pCtx, true);
14068# endif
14069
14070 /*
14071 * Do the decoding and emulation.
14072 */
14073 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14074 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14075 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14076 {
14077 Assert(pVCpu->iem.s.cActiveMappings == 0);
14078 pVCpu->iem.s.cInstructions++;
14079 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14080 {
14081 uint32_t fCpu = pVCpu->fLocalForcedActions
14082 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14083 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14084 | VMCPU_FF_TLB_FLUSH
14085# ifdef VBOX_WITH_RAW_MODE
14086 | VMCPU_FF_TRPM_SYNC_IDT
14087 | VMCPU_FF_SELM_SYNC_TSS
14088 | VMCPU_FF_SELM_SYNC_GDT
14089 | VMCPU_FF_SELM_SYNC_LDT
14090# endif
14091 | VMCPU_FF_INHIBIT_INTERRUPTS
14092 | VMCPU_FF_BLOCK_NMIS ));
14093
14094 if (RT_LIKELY( ( !fCpu
14095 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14096 && !pCtx->rflags.Bits.u1IF) )
14097 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14098 {
14099 if (cInstr-- > 0)
14100 {
14101 Assert(pVCpu->iem.s.cActiveMappings == 0);
14102 iemReInitDecoder(pVCpu);
14103 continue;
14104 }
14105 }
14106 }
14107 Assert(pVCpu->iem.s.cActiveMappings == 0);
14108 }
14109 else if (pVCpu->iem.s.cActiveMappings > 0)
14110 iemMemRollback(pVCpu);
14111 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14112 break;
14113 }
14114 }
14115# ifdef IEM_WITH_SETJMP
14116 else
14117 {
14118 if (pVCpu->iem.s.cActiveMappings > 0)
14119 iemMemRollback(pVCpu);
14120 pVCpu->iem.s.cLongJumps++;
14121 }
14122 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14123# endif
14124
14125 /*
14126 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14127 */
14128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14130# if defined(IEM_VERIFICATION_MODE_FULL)
14131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14134 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14135# endif
14136 }
14137
14138 /*
14139 * Maybe re-enter raw-mode and log.
14140 */
14141# ifdef IN_RC
14142 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14143# endif
14144 if (rcStrict != VINF_SUCCESS)
14145 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14146 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14147 if (pcInstructions)
14148 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14149 return rcStrict;
14150#endif /* Not verification mode */
14151}
14152
14153
14154
14155/**
14156 * Injects a trap, fault, abort, software interrupt or external interrupt.
14157 *
14158 * The parameter list matches TRPMQueryTrapAll pretty closely.
14159 *
14160 * @returns Strict VBox status code.
14161 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14162 * @param u8TrapNo The trap number.
14163 * @param enmType What type is it (trap/fault/abort), software
14164 * interrupt or hardware interrupt.
14165 * @param uErrCode The error code if applicable.
14166 * @param uCr2 The CR2 value if applicable.
14167 * @param cbInstr The instruction length (only relevant for
14168 * software interrupts).
14169 */
14170VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14171 uint8_t cbInstr)
14172{
14173 iemInitDecoder(pVCpu, false);
14174#ifdef DBGFTRACE_ENABLED
14175 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14176 u8TrapNo, enmType, uErrCode, uCr2);
14177#endif
14178
14179 uint32_t fFlags;
14180 switch (enmType)
14181 {
14182 case TRPM_HARDWARE_INT:
14183 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14184 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14185 uErrCode = uCr2 = 0;
14186 break;
14187
14188 case TRPM_SOFTWARE_INT:
14189 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14190 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14191 uErrCode = uCr2 = 0;
14192 break;
14193
14194 case TRPM_TRAP:
14195 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14196 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14197 if (u8TrapNo == X86_XCPT_PF)
14198 fFlags |= IEM_XCPT_FLAGS_CR2;
14199 switch (u8TrapNo)
14200 {
14201 case X86_XCPT_DF:
14202 case X86_XCPT_TS:
14203 case X86_XCPT_NP:
14204 case X86_XCPT_SS:
14205 case X86_XCPT_PF:
14206 case X86_XCPT_AC:
14207 fFlags |= IEM_XCPT_FLAGS_ERR;
14208 break;
14209
14210 case X86_XCPT_NMI:
14211 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14212 break;
14213 }
14214 break;
14215
14216 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14217 }
14218
14219 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14220}
14221
14222
14223/**
14224 * Injects the active TRPM event.
14225 *
14226 * @returns Strict VBox status code.
14227 * @param pVCpu The cross context virtual CPU structure.
14228 */
14229VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14230{
14231#ifndef IEM_IMPLEMENTS_TASKSWITCH
14232 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14233#else
14234 uint8_t u8TrapNo;
14235 TRPMEVENT enmType;
14236 RTGCUINT uErrCode;
14237 RTGCUINTPTR uCr2;
14238 uint8_t cbInstr;
14239 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14240 if (RT_FAILURE(rc))
14241 return rc;
14242
14243 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14244
14245 /** @todo Are there any other codes that imply the event was successfully
14246 * delivered to the guest? See @bugref{6607}. */
14247 if ( rcStrict == VINF_SUCCESS
14248 || rcStrict == VINF_IEM_RAISED_XCPT)
14249 {
14250 TRPMResetTrap(pVCpu);
14251 }
14252 return rcStrict;
14253#endif
14254}
14255
14256
14257VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14258{
14259 return VERR_NOT_IMPLEMENTED;
14260}
14261
14262
14263VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14264{
14265 return VERR_NOT_IMPLEMENTED;
14266}
14267
14268
14269#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14270/**
14271 * Executes a IRET instruction with default operand size.
14272 *
14273 * This is for PATM.
14274 *
14275 * @returns VBox status code.
14276 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14277 * @param pCtxCore The register frame.
14278 */
14279VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14280{
14281 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14282
14283 iemCtxCoreToCtx(pCtx, pCtxCore);
14284 iemInitDecoder(pVCpu);
14285 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14286 if (rcStrict == VINF_SUCCESS)
14287 iemCtxToCtxCore(pCtxCore, pCtx);
14288 else
14289 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14290 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14291 return rcStrict;
14292}
14293#endif
14294
14295
14296/**
14297 * Macro used by the IEMExec* method to check the given instruction length.
14298 *
14299 * Will return on failure!
14300 *
14301 * @param a_cbInstr The given instruction length.
14302 * @param a_cbMin The minimum length.
14303 */
14304#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14305 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14306 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14307
14308
14309/**
14310 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14311 *
14312 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14313 *
14314 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14316 * @param rcStrict The status code to fiddle.
14317 */
14318DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14319{
14320 iemUninitExec(pVCpu);
14321#ifdef IN_RC
14322 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14323 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14324#else
14325 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14326#endif
14327}
14328
14329
14330/**
14331 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14332 *
14333 * This API ASSUMES that the caller has already verified that the guest code is
14334 * allowed to access the I/O port. (The I/O port is in the DX register in the
14335 * guest state.)
14336 *
14337 * @returns Strict VBox status code.
14338 * @param pVCpu The cross context virtual CPU structure.
14339 * @param cbValue The size of the I/O port access (1, 2, or 4).
14340 * @param enmAddrMode The addressing mode.
14341 * @param fRepPrefix Indicates whether a repeat prefix is used
14342 * (doesn't matter which for this instruction).
14343 * @param cbInstr The instruction length in bytes.
14344 * @param iEffSeg The effective segment address.
14345 * @param fIoChecked Whether the access to the I/O port has been
14346 * checked or not. It's typically checked in the
14347 * HM scenario.
14348 */
14349VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14350 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14351{
14352 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14353 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14354
14355 /*
14356 * State init.
14357 */
14358 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14359
14360 /*
14361 * Switch orgy for getting to the right handler.
14362 */
14363 VBOXSTRICTRC rcStrict;
14364 if (fRepPrefix)
14365 {
14366 switch (enmAddrMode)
14367 {
14368 case IEMMODE_16BIT:
14369 switch (cbValue)
14370 {
14371 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14372 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14373 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14374 default:
14375 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14376 }
14377 break;
14378
14379 case IEMMODE_32BIT:
14380 switch (cbValue)
14381 {
14382 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14383 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14384 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14385 default:
14386 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14387 }
14388 break;
14389
14390 case IEMMODE_64BIT:
14391 switch (cbValue)
14392 {
14393 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14394 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14395 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14396 default:
14397 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14398 }
14399 break;
14400
14401 default:
14402 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14403 }
14404 }
14405 else
14406 {
14407 switch (enmAddrMode)
14408 {
14409 case IEMMODE_16BIT:
14410 switch (cbValue)
14411 {
14412 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14413 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14414 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14415 default:
14416 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14417 }
14418 break;
14419
14420 case IEMMODE_32BIT:
14421 switch (cbValue)
14422 {
14423 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14424 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14425 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14426 default:
14427 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14428 }
14429 break;
14430
14431 case IEMMODE_64BIT:
14432 switch (cbValue)
14433 {
14434 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14435 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14436 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14437 default:
14438 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14439 }
14440 break;
14441
14442 default:
14443 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14444 }
14445 }
14446
14447 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14448}
14449
14450
14451/**
14452 * Interface for HM and EM for executing string I/O IN (read) instructions.
14453 *
14454 * This API ASSUMES that the caller has already verified that the guest code is
14455 * allowed to access the I/O port. (The I/O port is in the DX register in the
14456 * guest state.)
14457 *
14458 * @returns Strict VBox status code.
14459 * @param pVCpu The cross context virtual CPU structure.
14460 * @param cbValue The size of the I/O port access (1, 2, or 4).
14461 * @param enmAddrMode The addressing mode.
14462 * @param fRepPrefix Indicates whether a repeat prefix is used
14463 * (doesn't matter which for this instruction).
14464 * @param cbInstr The instruction length in bytes.
14465 * @param fIoChecked Whether the access to the I/O port has been
14466 * checked or not. It's typically checked in the
14467 * HM scenario.
14468 */
14469VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14470 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14471{
14472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14473
14474 /*
14475 * State init.
14476 */
14477 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14478
14479 /*
14480 * Switch orgy for getting to the right handler.
14481 */
14482 VBOXSTRICTRC rcStrict;
14483 if (fRepPrefix)
14484 {
14485 switch (enmAddrMode)
14486 {
14487 case IEMMODE_16BIT:
14488 switch (cbValue)
14489 {
14490 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14491 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14492 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14493 default:
14494 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14495 }
14496 break;
14497
14498 case IEMMODE_32BIT:
14499 switch (cbValue)
14500 {
14501 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14502 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14503 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14504 default:
14505 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14506 }
14507 break;
14508
14509 case IEMMODE_64BIT:
14510 switch (cbValue)
14511 {
14512 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14513 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14514 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14515 default:
14516 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14517 }
14518 break;
14519
14520 default:
14521 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14522 }
14523 }
14524 else
14525 {
14526 switch (enmAddrMode)
14527 {
14528 case IEMMODE_16BIT:
14529 switch (cbValue)
14530 {
14531 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14532 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14533 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14534 default:
14535 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14536 }
14537 break;
14538
14539 case IEMMODE_32BIT:
14540 switch (cbValue)
14541 {
14542 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14543 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14544 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14545 default:
14546 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14547 }
14548 break;
14549
14550 case IEMMODE_64BIT:
14551 switch (cbValue)
14552 {
14553 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14554 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14555 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14556 default:
14557 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14558 }
14559 break;
14560
14561 default:
14562 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14563 }
14564 }
14565
14566 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14567}
14568
14569
14570/**
14571 * Interface for rawmode to write execute an OUT instruction.
14572 *
14573 * @returns Strict VBox status code.
14574 * @param pVCpu The cross context virtual CPU structure.
14575 * @param cbInstr The instruction length in bytes.
14576 * @param u16Port The port to read.
14577 * @param cbReg The register size.
14578 *
14579 * @remarks In ring-0 not all of the state needs to be synced in.
14580 */
14581VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14582{
14583 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14584 Assert(cbReg <= 4 && cbReg != 3);
14585
14586 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14588 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14589}
14590
14591
14592/**
14593 * Interface for rawmode to write execute an IN instruction.
14594 *
14595 * @returns Strict VBox status code.
14596 * @param pVCpu The cross context virtual CPU structure.
14597 * @param cbInstr The instruction length in bytes.
14598 * @param u16Port The port to read.
14599 * @param cbReg The register size.
14600 */
14601VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14602{
14603 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14604 Assert(cbReg <= 4 && cbReg != 3);
14605
14606 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14607 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14608 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14609}
14610
14611
14612/**
14613 * Interface for HM and EM to write to a CRx register.
14614 *
14615 * @returns Strict VBox status code.
14616 * @param pVCpu The cross context virtual CPU structure.
14617 * @param cbInstr The instruction length in bytes.
14618 * @param iCrReg The control register number (destination).
14619 * @param iGReg The general purpose register number (source).
14620 *
14621 * @remarks In ring-0 not all of the state needs to be synced in.
14622 */
14623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14624{
14625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14626 Assert(iCrReg < 16);
14627 Assert(iGReg < 16);
14628
14629 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14630 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14632}
14633
14634
14635/**
14636 * Interface for HM and EM to read from a CRx register.
14637 *
14638 * @returns Strict VBox status code.
14639 * @param pVCpu The cross context virtual CPU structure.
14640 * @param cbInstr The instruction length in bytes.
14641 * @param iGReg The general purpose register number (destination).
14642 * @param iCrReg The control register number (source).
14643 *
14644 * @remarks In ring-0 not all of the state needs to be synced in.
14645 */
14646VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14647{
14648 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14649 Assert(iCrReg < 16);
14650 Assert(iGReg < 16);
14651
14652 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14654 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14655}
14656
14657
14658/**
14659 * Interface for HM and EM to clear the CR0[TS] bit.
14660 *
14661 * @returns Strict VBox status code.
14662 * @param pVCpu The cross context virtual CPU structure.
14663 * @param cbInstr The instruction length in bytes.
14664 *
14665 * @remarks In ring-0 not all of the state needs to be synced in.
14666 */
14667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14668{
14669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14670
14671 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14673 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14674}
14675
14676
14677/**
14678 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14679 *
14680 * @returns Strict VBox status code.
14681 * @param pVCpu The cross context virtual CPU structure.
14682 * @param cbInstr The instruction length in bytes.
14683 * @param uValue The value to load into CR0.
14684 *
14685 * @remarks In ring-0 not all of the state needs to be synced in.
14686 */
14687VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14688{
14689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14690
14691 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14692 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14693 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14694}
14695
14696
14697/**
14698 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14699 *
14700 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14701 *
14702 * @returns Strict VBox status code.
14703 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14704 * @param cbInstr The instruction length in bytes.
14705 * @remarks In ring-0 not all of the state needs to be synced in.
14706 * @thread EMT(pVCpu)
14707 */
14708VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14709{
14710 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14711
14712 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14713 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14714 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14715}
14716
14717#ifdef IN_RING3
14718
14719/**
14720 * Handles the unlikely and probably fatal merge cases.
14721 *
14722 * @returns Merged status code.
14723 * @param rcStrict Current EM status code.
14724 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14725 * with @a rcStrict.
14726 * @param iMemMap The memory mapping index. For error reporting only.
14727 * @param pVCpu The cross context virtual CPU structure of the calling
14728 * thread, for error reporting only.
14729 */
14730DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14731 unsigned iMemMap, PVMCPU pVCpu)
14732{
14733 if (RT_FAILURE_NP(rcStrict))
14734 return rcStrict;
14735
14736 if (RT_FAILURE_NP(rcStrictCommit))
14737 return rcStrictCommit;
14738
14739 if (rcStrict == rcStrictCommit)
14740 return rcStrictCommit;
14741
14742 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14743 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14744 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14745 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14746 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14747 return VERR_IOM_FF_STATUS_IPE;
14748}
14749
14750
14751/**
14752 * Helper for IOMR3ProcessForceFlag.
14753 *
14754 * @returns Merged status code.
14755 * @param rcStrict Current EM status code.
14756 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14757 * with @a rcStrict.
14758 * @param iMemMap The memory mapping index. For error reporting only.
14759 * @param pVCpu The cross context virtual CPU structure of the calling
14760 * thread, for error reporting only.
14761 */
14762DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14763{
14764 /* Simple. */
14765 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14766 return rcStrictCommit;
14767
14768 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14769 return rcStrict;
14770
14771 /* EM scheduling status codes. */
14772 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14773 && rcStrict <= VINF_EM_LAST))
14774 {
14775 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14776 && rcStrictCommit <= VINF_EM_LAST))
14777 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14778 }
14779
14780 /* Unlikely */
14781 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14782}
14783
14784
14785/**
14786 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14787 *
14788 * @returns Merge between @a rcStrict and what the commit operation returned.
14789 * @param pVM The cross context VM structure.
14790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14791 * @param rcStrict The status code returned by ring-0 or raw-mode.
14792 */
14793VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14794{
14795 /*
14796 * Reset the pending commit.
14797 */
14798 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14799 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14800 ("%#x %#x %#x\n",
14801 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14802 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14803
14804 /*
14805 * Commit the pending bounce buffers (usually just one).
14806 */
14807 unsigned cBufs = 0;
14808 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14809 while (iMemMap-- > 0)
14810 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14811 {
14812 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14813 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14814 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14815
14816 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14817 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14818 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14819
14820 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14821 {
14822 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14824 pbBuf,
14825 cbFirst,
14826 PGMACCESSORIGIN_IEM);
14827 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14828 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14829 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14830 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14831 }
14832
14833 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14834 {
14835 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14837 pbBuf + cbFirst,
14838 cbSecond,
14839 PGMACCESSORIGIN_IEM);
14840 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14841 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14842 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14843 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14844 }
14845 cBufs++;
14846 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14847 }
14848
14849 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14850 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14851 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14852 pVCpu->iem.s.cActiveMappings = 0;
14853 return rcStrict;
14854}
14855
14856#endif /* IN_RING3 */
14857
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette