VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 45907

Last change on this file since 45907 was 45701, checked in by vboxsync, 12 years ago

VMM: SELM and VMM early HM init changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 313.6 KB
Line 
1/* $Id: IEMAll.cpp 45701 2013-04-24 14:21:09Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pgm.h>
86#include <internal/pgm.h>
87#include <VBox/vmm/iom.h>
88#include <VBox/vmm/em.h>
89#include <VBox/vmm/hm.h>
90#include <VBox/vmm/tm.h>
91#include <VBox/vmm/dbgf.h>
92#ifdef VBOX_WITH_RAW_MODE_NOT_R0
93# include <VBox/vmm/patm.h>
94#endif
95#include "IEMInternal.h"
96#ifdef IEM_VERIFICATION_MODE_FULL
97# include <VBox/vmm/rem.h>
98# include <VBox/vmm/mm.h>
99#endif
100#include <VBox/vmm/vm.h>
101#include <VBox/log.h>
102#include <VBox/err.h>
103#include <VBox/param.h>
104#include <iprt/assert.h>
105#include <iprt/string.h>
106#include <iprt/x86.h>
107
108
109/*******************************************************************************
110* Structures and Typedefs *
111*******************************************************************************/
112/** @typedef PFNIEMOP
113 * Pointer to an opcode decoder function.
114 */
115
116/** @def FNIEMOP_DEF
117 * Define an opcode decoder function.
118 *
119 * We're using macors for this so that adding and removing parameters as well as
120 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
121 *
122 * @param a_Name The function name.
123 */
124
125
126#if defined(__GNUC__) && defined(RT_ARCH_X86)
127typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
128# define FNIEMOP_DEF(a_Name) \
129 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
130# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
131 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
132# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
133 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
134
135#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
143
144#elif defined(__GNUC__)
145typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
152
153#else
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
161
162#endif
163
164
165/**
166 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
167 */
168typedef union IEMSELDESC
169{
170 /** The legacy view. */
171 X86DESC Legacy;
172 /** The long mode view. */
173 X86DESC64 Long;
174} IEMSELDESC;
175/** Pointer to a selector descriptor table entry. */
176typedef IEMSELDESC *PIEMSELDESC;
177
178
179/*******************************************************************************
180* Defined Constants And Macros *
181*******************************************************************************/
182/** @name IEM status codes.
183 *
184 * Not quite sure how this will play out in the end, just aliasing safe status
185 * codes for now.
186 *
187 * @{ */
188#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
189/** @} */
190
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogFunc(a_LoggerArgs); \
224 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
225 } while (0)
226#else
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
229#endif
230
231/**
232 * Call an opcode decoder function.
233 *
234 * We're using macors for this so that adding and removing parameters can be
235 * done as we please. See FNIEMOP_DEF.
236 */
237#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
238
239/**
240 * Call a common opcode decoder function taking one extra argument.
241 *
242 * We're using macors for this so that adding and removing parameters can be
243 * done as we please. See FNIEMOP_DEF_1.
244 */
245#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
246
247/**
248 * Call a common opcode decoder function taking one extra argument.
249 *
250 * We're using macors for this so that adding and removing parameters can be
251 * done as we please. See FNIEMOP_DEF_1.
252 */
253#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
254
255/**
256 * Check if we're currently executing in real or virtual 8086 mode.
257 *
258 * @returns @c true if it is, @c false if not.
259 * @param a_pIemCpu The IEM state of the current CPU.
260 */
261#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
262
263/**
264 * Check if we're currently executing in long mode.
265 *
266 * @returns @c true if it is, @c false if not.
267 * @param a_pIemCpu The IEM state of the current CPU.
268 */
269#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
270
271/**
272 * Check if we're currently executing in real mode.
273 *
274 * @returns @c true if it is, @c false if not.
275 * @param a_pIemCpu The IEM state of the current CPU.
276 */
277#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
278
279/**
280 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
281 */
282#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
283
284/**
285 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
286 */
287#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
288
289/**
290 * Tests if at least on of the specified AMD CPUID features (extended) are
291 * marked present.
292 */
293#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
294
295/**
296 * Checks if an Intel CPUID feature is present.
297 */
298#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
299 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
300 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325static const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334static const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343static const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352static const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361static const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370static const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379static const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390static const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401static const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410static const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419static const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428static const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437static const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446static const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455static const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464static const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486static const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495static const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504static const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513static const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523static const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532static const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559static const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568static const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577static const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587static const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596static const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605static const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
640/** What IEM just wrote. */
641uint8_t g_abIemWrote[256];
642/** How much IEM just wrote. */
643size_t g_cbIemWrote;
644#endif
645
646
647/*******************************************************************************
648* Internal Functions *
649*******************************************************************************/
650static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
651/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
652static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
653static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
654static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
655static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
656static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
657static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
658static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
659static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
660static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
661static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
662static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
663static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
664static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
665static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
666static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
667static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
668static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
669static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
670static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
671static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
672static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
673static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
674
675#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
676static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
677#endif
678static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
679static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
680
681
682/**
683 * Sets the pass up status.
684 *
685 * @returns VINF_SUCCESS.
686 * @param pIemCpu The per CPU IEM state of the calling thread.
687 * @param rcPassUp The pass up status. Must be informational.
688 * VINF_SUCCESS is not allowed.
689 */
690static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
691{
692 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
693
694 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
695 if (rcOldPassUp == VINF_SUCCESS)
696 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
697 /* If both are EM scheduling code, use EM priority rules. */
698 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
699 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
700 {
701 if (rcPassUp < rcOldPassUp)
702 {
703 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
704 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
705 }
706 else
707 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
708 }
709 /* Override EM scheduling with specific status code. */
710 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
711 {
712 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
713 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
714 }
715 /* Don't override specific status code, first come first served. */
716 else
717 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
718 return VINF_SUCCESS;
719}
720
721
722/**
723 * Initializes the decoder state.
724 *
725 * @param pIemCpu The per CPU IEM state.
726 * @param fBypassHandlers Whether to bypass access handlers.
727 */
728DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
732
733#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
734 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
735 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
736 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
737 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
738 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
739 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
740 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
741 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
742#endif
743
744#ifdef VBOX_WITH_RAW_MODE_NOT_R0
745 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
746#endif
747 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
748#ifdef IEM_VERIFICATION_MODE_FULL
749 if (pIemCpu->uInjectCpl != UINT8_MAX)
750 pIemCpu->uCpl = pIemCpu->uInjectCpl;
751#endif
752 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
753 ? IEMMODE_64BIT
754 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
755 ? IEMMODE_32BIT
756 : IEMMODE_16BIT;
757 pIemCpu->enmCpuMode = enmMode;
758 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
759 pIemCpu->enmEffAddrMode = enmMode;
760 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
761 pIemCpu->enmEffOpSize = enmMode;
762 pIemCpu->fPrefixes = 0;
763 pIemCpu->uRexReg = 0;
764 pIemCpu->uRexB = 0;
765 pIemCpu->uRexIndex = 0;
766 pIemCpu->iEffSeg = X86_SREG_DS;
767 pIemCpu->offOpcode = 0;
768 pIemCpu->cbOpcode = 0;
769 pIemCpu->cActiveMappings = 0;
770 pIemCpu->iNextMapping = 0;
771 pIemCpu->rcPassUp = VINF_SUCCESS;
772 pIemCpu->fBypassHandlers = fBypassHandlers;
773
774}
775
776
777/**
778 * Prefetch opcodes the first time when starting executing.
779 *
780 * @returns Strict VBox status code.
781 * @param pIemCpu The IEM state.
782 * @param fBypassHandlers Whether to bypass access handlers.
783 */
784static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
785{
786#ifdef IEM_VERIFICATION_MODE_FULL
787 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
788#endif
789 iemInitDecoder(pIemCpu, fBypassHandlers);
790
791 /*
792 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
793 *
794 * First translate CS:rIP to a physical address.
795 */
796 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
797 uint32_t cbToTryRead;
798 RTGCPTR GCPtrPC;
799 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
800 {
801 cbToTryRead = PAGE_SIZE;
802 GCPtrPC = pCtx->rip;
803 if (!IEM_IS_CANONICAL(GCPtrPC))
804 return iemRaiseGeneralProtectionFault0(pIemCpu);
805 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
806 }
807 else
808 {
809 uint32_t GCPtrPC32 = pCtx->eip;
810 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
811 if (GCPtrPC32 > pCtx->cs.u32Limit)
812 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
813 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
814 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
815 }
816
817#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
818 /* Allow interpretation of patch manager code blocks since they can for
819 instance throw #PFs for perfectly good reasons. */
820 if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1
821 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC))
822 {
823 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
824 if (cbToTryRead > cbLeftOnPage)
825 cbToTryRead = cbLeftOnPage;
826 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
827 cbToTryRead = sizeof(pIemCpu->abOpcode);
828 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
829 pIemCpu->cbOpcode = cbToTryRead;
830 return VINF_SUCCESS;
831 }
832#endif
833
834 RTGCPHYS GCPhys;
835 uint64_t fFlags;
836 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
837 if (RT_FAILURE(rc))
838 {
839 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
840 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
841 }
842 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
843 {
844 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
845 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
846 }
847 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
848 {
849 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
850 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
851 }
852 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
853 /** @todo Check reserved bits and such stuff. PGM is better at doing
854 * that, so do it when implementing the guest virtual address
855 * TLB... */
856
857#ifdef IEM_VERIFICATION_MODE_FULL
858 /*
859 * Optimistic optimization: Use unconsumed opcode bytes from the previous
860 * instruction.
861 */
862 /** @todo optimize this differently by not using PGMPhysRead. */
863 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
864 pIemCpu->GCPhysOpcodes = GCPhys;
865 if ( offPrevOpcodes < cbOldOpcodes
866 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
867 {
868 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
869 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
870 pIemCpu->cbOpcode = cbNew;
871 return VINF_SUCCESS;
872 }
873#endif
874
875 /*
876 * Read the bytes at this address.
877 */
878 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
879 if (cbToTryRead > cbLeftOnPage)
880 cbToTryRead = cbLeftOnPage;
881 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
882 cbToTryRead = sizeof(pIemCpu->abOpcode);
883 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
884 * doing that. */
885 if (!pIemCpu->fBypassHandlers)
886 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
887 else
888 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
889 if (rc != VINF_SUCCESS)
890 {
891 /** @todo status code handling */
892 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
893 GCPtrPC, GCPhys, rc, cbToTryRead));
894 return rc;
895 }
896 pIemCpu->cbOpcode = cbToTryRead;
897
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
904 * exception if it fails.
905 *
906 * @returns Strict VBox status code.
907 * @param pIemCpu The IEM state.
908 * @param cbMin Where to return the opcode byte.
909 */
910static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
911{
912 /*
913 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
914 *
915 * First translate CS:rIP to a physical address.
916 */
917 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
918 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
919 uint32_t cbToTryRead;
920 RTGCPTR GCPtrNext;
921 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
922 {
923 cbToTryRead = PAGE_SIZE;
924 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
925 if (!IEM_IS_CANONICAL(GCPtrNext))
926 return iemRaiseGeneralProtectionFault0(pIemCpu);
927 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
928 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
929 }
930 else
931 {
932 uint32_t GCPtrNext32 = pCtx->eip;
933 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
934 GCPtrNext32 += pIemCpu->cbOpcode;
935 if (GCPtrNext32 > pCtx->cs.u32Limit)
936 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
937 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
938 if (cbToTryRead < cbMin - cbLeft)
939 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
940 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
941 }
942
943 RTGCPHYS GCPhys;
944 uint64_t fFlags;
945 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
946 if (RT_FAILURE(rc))
947 {
948 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
949 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
950 }
951 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
952 {
953 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
954 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
955 }
956 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
957 {
958 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
959 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
960 }
961 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
962 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
963 /** @todo Check reserved bits and such stuff. PGM is better at doing
964 * that, so do it when implementing the guest virtual address
965 * TLB... */
966
967 /*
968 * Read the bytes at this address.
969 */
970 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
971 if (cbToTryRead > cbLeftOnPage)
972 cbToTryRead = cbLeftOnPage;
973 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
974 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
975 Assert(cbToTryRead >= cbMin - cbLeft);
976 if (!pIemCpu->fBypassHandlers)
977 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
978 else
979 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
980 if (rc != VINF_SUCCESS)
981 {
982 /** @todo status code handling */
983 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
984 return rc;
985 }
986 pIemCpu->cbOpcode += cbToTryRead;
987 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
988
989 return VINF_SUCCESS;
990}
991
992
993/**
994 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
995 *
996 * @returns Strict VBox status code.
997 * @param pIemCpu The IEM state.
998 * @param pb Where to return the opcode byte.
999 */
1000DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1001{
1002 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1003 if (rcStrict == VINF_SUCCESS)
1004 {
1005 uint8_t offOpcode = pIemCpu->offOpcode;
1006 *pb = pIemCpu->abOpcode[offOpcode];
1007 pIemCpu->offOpcode = offOpcode + 1;
1008 }
1009 else
1010 *pb = 0;
1011 return rcStrict;
1012}
1013
1014
1015/**
1016 * Fetches the next opcode byte.
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pIemCpu The IEM state.
1020 * @param pu8 Where to return the opcode byte.
1021 */
1022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1023{
1024 uint8_t const offOpcode = pIemCpu->offOpcode;
1025 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1026 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1027
1028 *pu8 = pIemCpu->abOpcode[offOpcode];
1029 pIemCpu->offOpcode = offOpcode + 1;
1030 return VINF_SUCCESS;
1031}
1032
1033
1034/**
1035 * Fetches the next opcode byte, returns automatically on failure.
1036 *
1037 * @param a_pu8 Where to return the opcode byte.
1038 * @remark Implicitly references pIemCpu.
1039 */
1040#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1041 do \
1042 { \
1043 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1044 if (rcStrict2 != VINF_SUCCESS) \
1045 return rcStrict2; \
1046 } while (0)
1047
1048
1049/**
1050 * Fetches the next signed byte from the opcode stream.
1051 *
1052 * @returns Strict VBox status code.
1053 * @param pIemCpu The IEM state.
1054 * @param pi8 Where to return the signed byte.
1055 */
1056DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1057{
1058 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1059}
1060
1061
1062/**
1063 * Fetches the next signed byte from the opcode stream, returning automatically
1064 * on failure.
1065 *
1066 * @param pi8 Where to return the signed byte.
1067 * @remark Implicitly references pIemCpu.
1068 */
1069#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1070 do \
1071 { \
1072 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1073 if (rcStrict2 != VINF_SUCCESS) \
1074 return rcStrict2; \
1075 } while (0)
1076
1077
1078/**
1079 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1080 *
1081 * @returns Strict VBox status code.
1082 * @param pIemCpu The IEM state.
1083 * @param pu16 Where to return the opcode dword.
1084 */
1085DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1086{
1087 uint8_t u8;
1088 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1089 if (rcStrict == VINF_SUCCESS)
1090 *pu16 = (int8_t)u8;
1091 return rcStrict;
1092}
1093
1094
1095/**
1096 * Fetches the next signed byte from the opcode stream, extending it to
1097 * unsigned 16-bit.
1098 *
1099 * @returns Strict VBox status code.
1100 * @param pIemCpu The IEM state.
1101 * @param pu16 Where to return the unsigned word.
1102 */
1103DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1104{
1105 uint8_t const offOpcode = pIemCpu->offOpcode;
1106 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1107 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1108
1109 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1110 pIemCpu->offOpcode = offOpcode + 1;
1111 return VINF_SUCCESS;
1112}
1113
1114
1115/**
1116 * Fetches the next signed byte from the opcode stream and sign-extending it to
1117 * a word, returning automatically on failure.
1118 *
1119 * @param pu16 Where to return the word.
1120 * @remark Implicitly references pIemCpu.
1121 */
1122#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1123 do \
1124 { \
1125 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1126 if (rcStrict2 != VINF_SUCCESS) \
1127 return rcStrict2; \
1128 } while (0)
1129
1130
1131/**
1132 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1133 *
1134 * @returns Strict VBox status code.
1135 * @param pIemCpu The IEM state.
1136 * @param pu32 Where to return the opcode dword.
1137 */
1138DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1139{
1140 uint8_t u8;
1141 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1142 if (rcStrict == VINF_SUCCESS)
1143 *pu32 = (int8_t)u8;
1144 return rcStrict;
1145}
1146
1147
1148/**
1149 * Fetches the next signed byte from the opcode stream, extending it to
1150 * unsigned 32-bit.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pIemCpu The IEM state.
1154 * @param pu32 Where to return the unsigned dword.
1155 */
1156DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1157{
1158 uint8_t const offOpcode = pIemCpu->offOpcode;
1159 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1160 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1161
1162 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1163 pIemCpu->offOpcode = offOpcode + 1;
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Fetches the next signed byte from the opcode stream and sign-extending it to
1170 * a word, returning automatically on failure.
1171 *
1172 * @param pu32 Where to return the word.
1173 * @remark Implicitly references pIemCpu.
1174 */
1175#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1176 do \
1177 { \
1178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1179 if (rcStrict2 != VINF_SUCCESS) \
1180 return rcStrict2; \
1181 } while (0)
1182
1183
1184/**
1185 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1186 *
1187 * @returns Strict VBox status code.
1188 * @param pIemCpu The IEM state.
1189 * @param pu64 Where to return the opcode qword.
1190 */
1191DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1192{
1193 uint8_t u8;
1194 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1195 if (rcStrict == VINF_SUCCESS)
1196 *pu64 = (int8_t)u8;
1197 return rcStrict;
1198}
1199
1200
1201/**
1202 * Fetches the next signed byte from the opcode stream, extending it to
1203 * unsigned 64-bit.
1204 *
1205 * @returns Strict VBox status code.
1206 * @param pIemCpu The IEM state.
1207 * @param pu64 Where to return the unsigned qword.
1208 */
1209DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1210{
1211 uint8_t const offOpcode = pIemCpu->offOpcode;
1212 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1213 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1214
1215 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1216 pIemCpu->offOpcode = offOpcode + 1;
1217 return VINF_SUCCESS;
1218}
1219
1220
1221/**
1222 * Fetches the next signed byte from the opcode stream and sign-extending it to
1223 * a word, returning automatically on failure.
1224 *
1225 * @param pu64 Where to return the word.
1226 * @remark Implicitly references pIemCpu.
1227 */
1228#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1229 do \
1230 { \
1231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1232 if (rcStrict2 != VINF_SUCCESS) \
1233 return rcStrict2; \
1234 } while (0)
1235
1236
1237/**
1238 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1239 *
1240 * @returns Strict VBox status code.
1241 * @param pIemCpu The IEM state.
1242 * @param pu16 Where to return the opcode word.
1243 */
1244DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1245{
1246 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1247 if (rcStrict == VINF_SUCCESS)
1248 {
1249 uint8_t offOpcode = pIemCpu->offOpcode;
1250 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1251 pIemCpu->offOpcode = offOpcode + 2;
1252 }
1253 else
1254 *pu16 = 0;
1255 return rcStrict;
1256}
1257
1258
1259/**
1260 * Fetches the next opcode word.
1261 *
1262 * @returns Strict VBox status code.
1263 * @param pIemCpu The IEM state.
1264 * @param pu16 Where to return the opcode word.
1265 */
1266DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1267{
1268 uint8_t const offOpcode = pIemCpu->offOpcode;
1269 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1270 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1271
1272 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1273 pIemCpu->offOpcode = offOpcode + 2;
1274 return VINF_SUCCESS;
1275}
1276
1277
1278/**
1279 * Fetches the next opcode word, returns automatically on failure.
1280 *
1281 * @param a_pu16 Where to return the opcode word.
1282 * @remark Implicitly references pIemCpu.
1283 */
1284#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1285 do \
1286 { \
1287 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1288 if (rcStrict2 != VINF_SUCCESS) \
1289 return rcStrict2; \
1290 } while (0)
1291
1292
1293/**
1294 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1295 *
1296 * @returns Strict VBox status code.
1297 * @param pIemCpu The IEM state.
1298 * @param pu32 Where to return the opcode double word.
1299 */
1300DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1301{
1302 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1303 if (rcStrict == VINF_SUCCESS)
1304 {
1305 uint8_t offOpcode = pIemCpu->offOpcode;
1306 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1307 pIemCpu->offOpcode = offOpcode + 2;
1308 }
1309 else
1310 *pu32 = 0;
1311 return rcStrict;
1312}
1313
1314
1315/**
1316 * Fetches the next opcode word, zero extending it to a double word.
1317 *
1318 * @returns Strict VBox status code.
1319 * @param pIemCpu The IEM state.
1320 * @param pu32 Where to return the opcode double word.
1321 */
1322DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1323{
1324 uint8_t const offOpcode = pIemCpu->offOpcode;
1325 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1326 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1327
1328 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1329 pIemCpu->offOpcode = offOpcode + 2;
1330 return VINF_SUCCESS;
1331}
1332
1333
1334/**
1335 * Fetches the next opcode word and zero extends it to a double word, returns
1336 * automatically on failure.
1337 *
1338 * @param a_pu32 Where to return the opcode double word.
1339 * @remark Implicitly references pIemCpu.
1340 */
1341#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1342 do \
1343 { \
1344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1345 if (rcStrict2 != VINF_SUCCESS) \
1346 return rcStrict2; \
1347 } while (0)
1348
1349
1350/**
1351 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pIemCpu The IEM state.
1355 * @param pu64 Where to return the opcode quad word.
1356 */
1357DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1358{
1359 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1360 if (rcStrict == VINF_SUCCESS)
1361 {
1362 uint8_t offOpcode = pIemCpu->offOpcode;
1363 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1364 pIemCpu->offOpcode = offOpcode + 2;
1365 }
1366 else
1367 *pu64 = 0;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next opcode word, zero extending it to a quad word.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pIemCpu The IEM state.
1377 * @param pu64 Where to return the opcode quad word.
1378 */
1379DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1380{
1381 uint8_t const offOpcode = pIemCpu->offOpcode;
1382 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1383 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1384
1385 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1386 pIemCpu->offOpcode = offOpcode + 2;
1387 return VINF_SUCCESS;
1388}
1389
1390
1391/**
1392 * Fetches the next opcode word and zero extends it to a quad word, returns
1393 * automatically on failure.
1394 *
1395 * @param a_pu64 Where to return the opcode quad word.
1396 * @remark Implicitly references pIemCpu.
1397 */
1398#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1399 do \
1400 { \
1401 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1402 if (rcStrict2 != VINF_SUCCESS) \
1403 return rcStrict2; \
1404 } while (0)
1405
1406
1407/**
1408 * Fetches the next signed word from the opcode stream.
1409 *
1410 * @returns Strict VBox status code.
1411 * @param pIemCpu The IEM state.
1412 * @param pi16 Where to return the signed word.
1413 */
1414DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1415{
1416 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1417}
1418
1419
1420/**
1421 * Fetches the next signed word from the opcode stream, returning automatically
1422 * on failure.
1423 *
1424 * @param pi16 Where to return the signed word.
1425 * @remark Implicitly references pIemCpu.
1426 */
1427#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1428 do \
1429 { \
1430 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1431 if (rcStrict2 != VINF_SUCCESS) \
1432 return rcStrict2; \
1433 } while (0)
1434
1435
1436/**
1437 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1438 *
1439 * @returns Strict VBox status code.
1440 * @param pIemCpu The IEM state.
1441 * @param pu32 Where to return the opcode dword.
1442 */
1443DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1444{
1445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1446 if (rcStrict == VINF_SUCCESS)
1447 {
1448 uint8_t offOpcode = pIemCpu->offOpcode;
1449 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1450 pIemCpu->abOpcode[offOpcode + 1],
1451 pIemCpu->abOpcode[offOpcode + 2],
1452 pIemCpu->abOpcode[offOpcode + 3]);
1453 pIemCpu->offOpcode = offOpcode + 4;
1454 }
1455 else
1456 *pu32 = 0;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next opcode dword.
1463 *
1464 * @returns Strict VBox status code.
1465 * @param pIemCpu The IEM state.
1466 * @param pu32 Where to return the opcode double word.
1467 */
1468DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1469{
1470 uint8_t const offOpcode = pIemCpu->offOpcode;
1471 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1472 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1473
1474 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1475 pIemCpu->abOpcode[offOpcode + 1],
1476 pIemCpu->abOpcode[offOpcode + 2],
1477 pIemCpu->abOpcode[offOpcode + 3]);
1478 pIemCpu->offOpcode = offOpcode + 4;
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Fetches the next opcode dword, returns automatically on failure.
1485 *
1486 * @param a_pu32 Where to return the opcode dword.
1487 * @remark Implicitly references pIemCpu.
1488 */
1489#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1490 do \
1491 { \
1492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1493 if (rcStrict2 != VINF_SUCCESS) \
1494 return rcStrict2; \
1495 } while (0)
1496
1497
1498/**
1499 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1500 *
1501 * @returns Strict VBox status code.
1502 * @param pIemCpu The IEM state.
1503 * @param pu32 Where to return the opcode dword.
1504 */
1505DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1506{
1507 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1508 if (rcStrict == VINF_SUCCESS)
1509 {
1510 uint8_t offOpcode = pIemCpu->offOpcode;
1511 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1512 pIemCpu->abOpcode[offOpcode + 1],
1513 pIemCpu->abOpcode[offOpcode + 2],
1514 pIemCpu->abOpcode[offOpcode + 3]);
1515 pIemCpu->offOpcode = offOpcode + 4;
1516 }
1517 else
1518 *pu64 = 0;
1519 return rcStrict;
1520}
1521
1522
1523/**
1524 * Fetches the next opcode dword, zero extending it to a quad word.
1525 *
1526 * @returns Strict VBox status code.
1527 * @param pIemCpu The IEM state.
1528 * @param pu64 Where to return the opcode quad word.
1529 */
1530DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1531{
1532 uint8_t const offOpcode = pIemCpu->offOpcode;
1533 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1534 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1535
1536 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1537 pIemCpu->abOpcode[offOpcode + 1],
1538 pIemCpu->abOpcode[offOpcode + 2],
1539 pIemCpu->abOpcode[offOpcode + 3]);
1540 pIemCpu->offOpcode = offOpcode + 4;
1541 return VINF_SUCCESS;
1542}
1543
1544
1545/**
1546 * Fetches the next opcode dword and zero extends it to a quad word, returns
1547 * automatically on failure.
1548 *
1549 * @param a_pu64 Where to return the opcode quad word.
1550 * @remark Implicitly references pIemCpu.
1551 */
1552#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1553 do \
1554 { \
1555 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1556 if (rcStrict2 != VINF_SUCCESS) \
1557 return rcStrict2; \
1558 } while (0)
1559
1560
1561/**
1562 * Fetches the next signed double word from the opcode stream.
1563 *
1564 * @returns Strict VBox status code.
1565 * @param pIemCpu The IEM state.
1566 * @param pi32 Where to return the signed double word.
1567 */
1568DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1569{
1570 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1571}
1572
1573/**
1574 * Fetches the next signed double word from the opcode stream, returning
1575 * automatically on failure.
1576 *
1577 * @param pi32 Where to return the signed double word.
1578 * @remark Implicitly references pIemCpu.
1579 */
1580#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1581 do \
1582 { \
1583 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1584 if (rcStrict2 != VINF_SUCCESS) \
1585 return rcStrict2; \
1586 } while (0)
1587
1588
1589/**
1590 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1591 *
1592 * @returns Strict VBox status code.
1593 * @param pIemCpu The IEM state.
1594 * @param pu64 Where to return the opcode qword.
1595 */
1596DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1597{
1598 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1599 if (rcStrict == VINF_SUCCESS)
1600 {
1601 uint8_t offOpcode = pIemCpu->offOpcode;
1602 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1603 pIemCpu->abOpcode[offOpcode + 1],
1604 pIemCpu->abOpcode[offOpcode + 2],
1605 pIemCpu->abOpcode[offOpcode + 3]);
1606 pIemCpu->offOpcode = offOpcode + 4;
1607 }
1608 else
1609 *pu64 = 0;
1610 return rcStrict;
1611}
1612
1613
1614/**
1615 * Fetches the next opcode dword, sign extending it into a quad word.
1616 *
1617 * @returns Strict VBox status code.
1618 * @param pIemCpu The IEM state.
1619 * @param pu64 Where to return the opcode quad word.
1620 */
1621DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1622{
1623 uint8_t const offOpcode = pIemCpu->offOpcode;
1624 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1625 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1626
1627 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1628 pIemCpu->abOpcode[offOpcode + 1],
1629 pIemCpu->abOpcode[offOpcode + 2],
1630 pIemCpu->abOpcode[offOpcode + 3]);
1631 *pu64 = i32;
1632 pIemCpu->offOpcode = offOpcode + 4;
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Fetches the next opcode double word and sign extends it to a quad word,
1639 * returns automatically on failure.
1640 *
1641 * @param a_pu64 Where to return the opcode quad word.
1642 * @remark Implicitly references pIemCpu.
1643 */
1644#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1645 do \
1646 { \
1647 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1648 if (rcStrict2 != VINF_SUCCESS) \
1649 return rcStrict2; \
1650 } while (0)
1651
1652
1653/**
1654 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1655 *
1656 * @returns Strict VBox status code.
1657 * @param pIemCpu The IEM state.
1658 * @param pu64 Where to return the opcode qword.
1659 */
1660DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1661{
1662 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1663 if (rcStrict == VINF_SUCCESS)
1664 {
1665 uint8_t offOpcode = pIemCpu->offOpcode;
1666 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1667 pIemCpu->abOpcode[offOpcode + 1],
1668 pIemCpu->abOpcode[offOpcode + 2],
1669 pIemCpu->abOpcode[offOpcode + 3],
1670 pIemCpu->abOpcode[offOpcode + 4],
1671 pIemCpu->abOpcode[offOpcode + 5],
1672 pIemCpu->abOpcode[offOpcode + 6],
1673 pIemCpu->abOpcode[offOpcode + 7]);
1674 pIemCpu->offOpcode = offOpcode + 8;
1675 }
1676 else
1677 *pu64 = 0;
1678 return rcStrict;
1679}
1680
1681
1682/**
1683 * Fetches the next opcode qword.
1684 *
1685 * @returns Strict VBox status code.
1686 * @param pIemCpu The IEM state.
1687 * @param pu64 Where to return the opcode qword.
1688 */
1689DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1690{
1691 uint8_t const offOpcode = pIemCpu->offOpcode;
1692 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1693 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1694
1695 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1696 pIemCpu->abOpcode[offOpcode + 1],
1697 pIemCpu->abOpcode[offOpcode + 2],
1698 pIemCpu->abOpcode[offOpcode + 3],
1699 pIemCpu->abOpcode[offOpcode + 4],
1700 pIemCpu->abOpcode[offOpcode + 5],
1701 pIemCpu->abOpcode[offOpcode + 6],
1702 pIemCpu->abOpcode[offOpcode + 7]);
1703 pIemCpu->offOpcode = offOpcode + 8;
1704 return VINF_SUCCESS;
1705}
1706
1707
1708/**
1709 * Fetches the next opcode quad word, returns automatically on failure.
1710 *
1711 * @param a_pu64 Where to return the opcode quad word.
1712 * @remark Implicitly references pIemCpu.
1713 */
1714#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1715 do \
1716 { \
1717 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1718 if (rcStrict2 != VINF_SUCCESS) \
1719 return rcStrict2; \
1720 } while (0)
1721
1722
1723/** @name Misc Worker Functions.
1724 * @{
1725 */
1726
1727
1728/**
1729 * Validates a new SS segment.
1730 *
1731 * @returns VBox strict status code.
1732 * @param pIemCpu The IEM per CPU instance data.
1733 * @param pCtx The CPU context.
1734 * @param NewSS The new SS selctor.
1735 * @param uCpl The CPL to load the stack for.
1736 * @param pDesc Where to return the descriptor.
1737 */
1738static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1739{
1740 NOREF(pCtx);
1741
1742 /* Null selectors are not allowed (we're not called for dispatching
1743 interrupts with SS=0 in long mode). */
1744 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1745 {
1746 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1747 return iemRaiseGeneralProtectionFault0(pIemCpu);
1748 }
1749
1750 /*
1751 * Read the descriptor.
1752 */
1753 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1754 if (rcStrict != VINF_SUCCESS)
1755 return rcStrict;
1756
1757 /*
1758 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1759 */
1760 if (!pDesc->Legacy.Gen.u1DescType)
1761 {
1762 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1763 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1764 }
1765
1766 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1767 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1768 {
1769 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1770 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1771 }
1772 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1773 if ((NewSS & X86_SEL_RPL) != uCpl)
1774 {
1775 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1776 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1777 }
1778 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1779 {
1780 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1781 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1782 }
1783
1784 /* Is it there? */
1785 /** @todo testcase: Is this checked before the canonical / limit check below? */
1786 if (!pDesc->Legacy.Gen.u1Present)
1787 {
1788 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1789 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1790 }
1791
1792 return VINF_SUCCESS;
1793}
1794
1795
1796/**
1797 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1798 * not.
1799 *
1800 * @param a_pIemCpu The IEM per CPU data.
1801 * @param a_pCtx The CPU context.
1802 */
1803#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1804# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1805 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1806 ? (a_pCtx)->eflags.u \
1807 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1808#else
1809# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1810 ( (a_pCtx)->eflags.u )
1811#endif
1812
1813/**
1814 * Updates the EFLAGS in the correct manner wrt. PATM.
1815 *
1816 * @param a_pIemCpu The IEM per CPU data.
1817 * @param a_pCtx The CPU context.
1818 */
1819#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1820# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1821 do { \
1822 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1823 (a_pCtx)->eflags.u = (a_fEfl); \
1824 else \
1825 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1826 } while (0)
1827#else
1828# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1829 do { \
1830 (a_pCtx)->eflags.u = (a_fEfl); \
1831 } while (0)
1832#endif
1833
1834
1835/** @} */
1836
1837/** @name Raising Exceptions.
1838 *
1839 * @{
1840 */
1841
1842/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1843 * @{ */
1844/** CPU exception. */
1845#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1846/** External interrupt (from PIC, APIC, whatever). */
1847#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1848/** Software interrupt (int, into or bound). */
1849#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1850/** Takes an error code. */
1851#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1852/** Takes a CR2. */
1853#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1854/** Generated by the breakpoint instruction. */
1855#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1856/** @} */
1857
1858/**
1859 * Loads the specified stack far pointer from the TSS.
1860 *
1861 * @returns VBox strict status code.
1862 * @param pIemCpu The IEM per CPU instance data.
1863 * @param pCtx The CPU context.
1864 * @param uCpl The CPL to load the stack for.
1865 * @param pSelSS Where to return the new stack segment.
1866 * @param puEsp Where to return the new stack pointer.
1867 */
1868static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1869 PRTSEL pSelSS, uint32_t *puEsp)
1870{
1871 VBOXSTRICTRC rcStrict;
1872 Assert(uCpl < 4);
1873 *puEsp = 0; /* make gcc happy */
1874 *pSelSS = 0; /* make gcc happy */
1875
1876 switch (pCtx->tr.Attr.n.u4Type)
1877 {
1878 /*
1879 * 16-bit TSS (X86TSS16).
1880 */
1881 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1883 {
1884 uint32_t off = uCpl * 4 + 2;
1885 if (off + 4 > pCtx->tr.u32Limit)
1886 {
1887 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1888 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1889 }
1890
1891 uint32_t u32Tmp = 0; /* gcc maybe... */
1892 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1893 if (rcStrict == VINF_SUCCESS)
1894 {
1895 *puEsp = RT_LOWORD(u32Tmp);
1896 *pSelSS = RT_HIWORD(u32Tmp);
1897 return VINF_SUCCESS;
1898 }
1899 break;
1900 }
1901
1902 /*
1903 * 32-bit TSS (X86TSS32).
1904 */
1905 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1906 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1907 {
1908 uint32_t off = uCpl * 8 + 4;
1909 if (off + 7 > pCtx->tr.u32Limit)
1910 {
1911 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1912 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1913 }
1914
1915 uint64_t u64Tmp;
1916 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1917 if (rcStrict == VINF_SUCCESS)
1918 {
1919 *puEsp = u64Tmp & UINT32_MAX;
1920 *pSelSS = (RTSEL)(u64Tmp >> 32);
1921 return VINF_SUCCESS;
1922 }
1923 break;
1924 }
1925
1926 default:
1927 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1928 }
1929 return rcStrict;
1930}
1931
1932
1933/**
1934 * Adjust the CPU state according to the exception being raised.
1935 *
1936 * @param pCtx The CPU context.
1937 * @param u8Vector The exception that has been raised.
1938 */
1939DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1940{
1941 switch (u8Vector)
1942 {
1943 case X86_XCPT_DB:
1944 pCtx->dr[7] &= ~X86_DR7_GD;
1945 break;
1946 /** @todo Read the AMD and Intel exception reference... */
1947 }
1948}
1949
1950
1951/**
1952 * Implements exceptions and interrupts for real mode.
1953 *
1954 * @returns VBox strict status code.
1955 * @param pIemCpu The IEM per CPU instance data.
1956 * @param pCtx The CPU context.
1957 * @param cbInstr The number of bytes to offset rIP by in the return
1958 * address.
1959 * @param u8Vector The interrupt / exception vector number.
1960 * @param fFlags The flags.
1961 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1962 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1963 */
1964static VBOXSTRICTRC
1965iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1966 PCPUMCTX pCtx,
1967 uint8_t cbInstr,
1968 uint8_t u8Vector,
1969 uint32_t fFlags,
1970 uint16_t uErr,
1971 uint64_t uCr2)
1972{
1973 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1974 NOREF(uErr); NOREF(uCr2);
1975
1976 /*
1977 * Read the IDT entry.
1978 */
1979 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1980 {
1981 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1982 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1983 }
1984 RTFAR16 Idte;
1985 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1986 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1987 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1988 return rcStrict;
1989
1990 /*
1991 * Push the stack frame.
1992 */
1993 uint16_t *pu16Frame;
1994 uint64_t uNewRsp;
1995 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1996 if (rcStrict != VINF_SUCCESS)
1997 return rcStrict;
1998
1999 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2000 pu16Frame[2] = (uint16_t)fEfl;
2001 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2002 pu16Frame[0] = pCtx->ip + cbInstr;
2003 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2004 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2005 return rcStrict;
2006
2007 /*
2008 * Load the vector address into cs:ip and make exception specific state
2009 * adjustments.
2010 */
2011 pCtx->cs.Sel = Idte.sel;
2012 pCtx->cs.ValidSel = Idte.sel;
2013 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2014 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2015 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2016 pCtx->rip = Idte.off;
2017 fEfl &= ~X86_EFL_IF;
2018 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2019
2020 /** @todo do we actually do this in real mode? */
2021 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2022 iemRaiseXcptAdjustState(pCtx, u8Vector);
2023
2024 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2025}
2026
2027
2028/**
2029 * Implements exceptions and interrupts for protected mode.
2030 *
2031 * @returns VBox strict status code.
2032 * @param pIemCpu The IEM per CPU instance data.
2033 * @param pCtx The CPU context.
2034 * @param cbInstr The number of bytes to offset rIP by in the return
2035 * address.
2036 * @param u8Vector The interrupt / exception vector number.
2037 * @param fFlags The flags.
2038 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2039 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2040 */
2041static VBOXSTRICTRC
2042iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2043 PCPUMCTX pCtx,
2044 uint8_t cbInstr,
2045 uint8_t u8Vector,
2046 uint32_t fFlags,
2047 uint16_t uErr,
2048 uint64_t uCr2)
2049{
2050 NOREF(cbInstr);
2051
2052 /*
2053 * Read the IDT entry.
2054 */
2055 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2056 {
2057 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2058 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2059 }
2060 X86DESC Idte;
2061 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2062 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2063 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2064 return rcStrict;
2065 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2066 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2067 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2068
2069 /*
2070 * Check the descriptor type, DPL and such.
2071 * ASSUMES this is done in the same order as described for call-gate calls.
2072 */
2073 if (Idte.Gate.u1DescType)
2074 {
2075 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2076 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2077 }
2078 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2079 switch (Idte.Gate.u4Type)
2080 {
2081 case X86_SEL_TYPE_SYS_UNDEFINED:
2082 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2083 case X86_SEL_TYPE_SYS_LDT:
2084 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2085 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2086 case X86_SEL_TYPE_SYS_UNDEFINED2:
2087 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2088 case X86_SEL_TYPE_SYS_UNDEFINED3:
2089 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2090 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2091 case X86_SEL_TYPE_SYS_UNDEFINED4:
2092 {
2093 /** @todo check what actually happens when the type is wrong...
2094 * esp. call gates. */
2095 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2096 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2097 }
2098
2099 case X86_SEL_TYPE_SYS_286_INT_GATE:
2100 case X86_SEL_TYPE_SYS_386_INT_GATE:
2101 fEflToClear |= X86_EFL_IF;
2102 break;
2103
2104 case X86_SEL_TYPE_SYS_TASK_GATE:
2105 /** @todo task gates. */
2106 AssertFailedReturn(VERR_NOT_SUPPORTED);
2107
2108 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2109 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2110 break;
2111
2112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2113 }
2114
2115 /* Check DPL against CPL if applicable. */
2116 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2117 {
2118 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2119 {
2120 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2121 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2122 }
2123 }
2124
2125 /* Is it there? */
2126 if (!Idte.Gate.u1Present)
2127 {
2128 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2129 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2130 }
2131
2132 /* A null CS is bad. */
2133 RTSEL NewCS = Idte.Gate.u16Sel;
2134 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2135 {
2136 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2137 return iemRaiseGeneralProtectionFault0(pIemCpu);
2138 }
2139
2140 /* Fetch the descriptor for the new CS. */
2141 IEMSELDESC DescCS;
2142 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2143 if (rcStrict != VINF_SUCCESS)
2144 {
2145 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2146 return rcStrict;
2147 }
2148
2149 /* Must be a code segment. */
2150 if (!DescCS.Legacy.Gen.u1DescType)
2151 {
2152 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2153 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2154 }
2155 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2156 {
2157 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2158 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2159 }
2160
2161 /* Don't allow lowering the privilege level. */
2162 /** @todo Does the lowering of privileges apply to software interrupts
2163 * only? This has bearings on the more-privileged or
2164 * same-privilege stack behavior further down. A testcase would
2165 * be nice. */
2166 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2167 {
2168 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2169 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2170 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2171 }
2172 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
2173
2174 /* Check the new EIP against the new CS limit. */
2175 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2176 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2177 ? Idte.Gate.u16OffsetLow
2178 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2179 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2180 if (uNewEip > cbLimitCS)
2181 {
2182 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2183 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2184 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2185 }
2186
2187 /* Make sure the selector is present. */
2188 if (!DescCS.Legacy.Gen.u1Present)
2189 {
2190 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2191 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2192 }
2193
2194 /*
2195 * If the privilege level changes, we need to get a new stack from the TSS.
2196 * This in turns means validating the new SS and ESP...
2197 */
2198 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2199 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2200 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2201 if (uNewCpl != pIemCpu->uCpl)
2202 {
2203 RTSEL NewSS;
2204 uint32_t uNewEsp;
2205 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208
2209 IEMSELDESC DescSS;
2210 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2211 if (rcStrict != VINF_SUCCESS)
2212 return rcStrict;
2213
2214 /* Check that there is sufficient space for the stack frame. */
2215 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2216 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2217 {
2218 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2219 }
2220
2221 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2222 if ( uNewEsp - 1 > cbLimitSS
2223 || uNewEsp < cbStackFrame)
2224 {
2225 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2226 u8Vector, NewSS, uNewEsp, cbStackFrame));
2227 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2228 }
2229
2230 /*
2231 * Start making changes.
2232 */
2233
2234 /* Create the stack frame. */
2235 RTPTRUNION uStackFrame;
2236 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2237 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2238 if (rcStrict != VINF_SUCCESS)
2239 return rcStrict;
2240 void * const pvStackFrame = uStackFrame.pv;
2241
2242 if (fFlags & IEM_XCPT_FLAGS_ERR)
2243 *uStackFrame.pu32++ = uErr;
2244 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2245 ? pCtx->eip + cbInstr : pCtx->eip;
2246 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2247 uStackFrame.pu32[2] = fEfl;
2248 uStackFrame.pu32[3] = pCtx->esp;
2249 uStackFrame.pu32[4] = pCtx->ss.Sel;
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Mark the selectors 'accessed' (hope this is the correct time). */
2255 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2256 * after pushing the stack frame? (Write protect the gdt + stack to
2257 * find out.) */
2258 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2259 {
2260 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2261 if (rcStrict != VINF_SUCCESS)
2262 return rcStrict;
2263 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2264 }
2265
2266 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2267 {
2268 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2269 if (rcStrict != VINF_SUCCESS)
2270 return rcStrict;
2271 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2272 }
2273
2274 /*
2275 * Start comitting the register changes (joins with the DPL=CPL branch).
2276 */
2277 pCtx->ss.Sel = NewSS;
2278 pCtx->ss.ValidSel = NewSS;
2279 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2280 pCtx->ss.u32Limit = cbLimitSS;
2281 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2282 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2283 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2284 pIemCpu->uCpl = uNewCpl;
2285 }
2286 /*
2287 * Same privilege, no stack change and smaller stack frame.
2288 */
2289 else
2290 {
2291 uint64_t uNewRsp;
2292 RTPTRUNION uStackFrame;
2293 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2294 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2295 if (rcStrict != VINF_SUCCESS)
2296 return rcStrict;
2297 void * const pvStackFrame = uStackFrame.pv;
2298
2299 if (fFlags & IEM_XCPT_FLAGS_ERR)
2300 *uStackFrame.pu32++ = uErr;
2301 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2302 ? pCtx->eip + cbInstr : pCtx->eip;
2303 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2304 uStackFrame.pu32[2] = fEfl;
2305 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2306 if (rcStrict != VINF_SUCCESS)
2307 return rcStrict;
2308
2309 /* Mark the CS selector as 'accessed'. */
2310 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2311 {
2312 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2313 if (rcStrict != VINF_SUCCESS)
2314 return rcStrict;
2315 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2316 }
2317
2318 /*
2319 * Start committing the register changes (joins with the other branch).
2320 */
2321 pCtx->rsp = uNewRsp;
2322 }
2323
2324 /* ... register committing continues. */
2325 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2326 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2327 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2328 pCtx->cs.u32Limit = cbLimitCS;
2329 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2330 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2331
2332 pCtx->rip = uNewEip;
2333 fEfl &= ~fEflToClear;
2334 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2335
2336 if (fFlags & IEM_XCPT_FLAGS_CR2)
2337 pCtx->cr2 = uCr2;
2338
2339 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2340 iemRaiseXcptAdjustState(pCtx, u8Vector);
2341
2342 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2343}
2344
2345
2346/**
2347 * Implements exceptions and interrupts for V8086 mode.
2348 *
2349 * @returns VBox strict status code.
2350 * @param pIemCpu The IEM per CPU instance data.
2351 * @param pCtx The CPU context.
2352 * @param cbInstr The number of bytes to offset rIP by in the return
2353 * address.
2354 * @param u8Vector The interrupt / exception vector number.
2355 * @param fFlags The flags.
2356 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2357 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2358 */
2359static VBOXSTRICTRC
2360iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2361 PCPUMCTX pCtx,
2362 uint8_t cbInstr,
2363 uint8_t u8Vector,
2364 uint32_t fFlags,
2365 uint16_t uErr,
2366 uint64_t uCr2)
2367{
2368 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2369 /** @todo implement me. */
2370 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2371}
2372
2373
2374/**
2375 * Implements exceptions and interrupts for long mode.
2376 *
2377 * @returns VBox strict status code.
2378 * @param pIemCpu The IEM per CPU instance data.
2379 * @param pCtx The CPU context.
2380 * @param cbInstr The number of bytes to offset rIP by in the return
2381 * address.
2382 * @param u8Vector The interrupt / exception vector number.
2383 * @param fFlags The flags.
2384 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2385 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2386 */
2387static VBOXSTRICTRC
2388iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2389 PCPUMCTX pCtx,
2390 uint8_t cbInstr,
2391 uint8_t u8Vector,
2392 uint32_t fFlags,
2393 uint16_t uErr,
2394 uint64_t uCr2)
2395{
2396 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2397 /** @todo implement me. */
2398 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
2399}
2400
2401
2402/**
2403 * Implements exceptions and interrupts.
2404 *
2405 * All exceptions and interrupts goes thru this function!
2406 *
2407 * @returns VBox strict status code.
2408 * @param pIemCpu The IEM per CPU instance data.
2409 * @param cbInstr The number of bytes to offset rIP by in the return
2410 * address.
2411 * @param u8Vector The interrupt / exception vector number.
2412 * @param fFlags The flags.
2413 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2414 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2415 */
2416DECL_NO_INLINE(static, VBOXSTRICTRC)
2417iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2418 uint8_t cbInstr,
2419 uint8_t u8Vector,
2420 uint32_t fFlags,
2421 uint16_t uErr,
2422 uint64_t uCr2)
2423{
2424 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2425
2426 /*
2427 * Do recursion accounting.
2428 */
2429 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2430 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2431 if (pIemCpu->cXcptRecursions == 0)
2432 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2433 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2434 else
2435 {
2436 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2437 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2438
2439 /** @todo double and tripple faults. */
2440 if (pIemCpu->cXcptRecursions >= 3)
2441 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2442
2443 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2444 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2445 {
2446 ....
2447 } */
2448 }
2449 pIemCpu->cXcptRecursions++;
2450 pIemCpu->uCurXcpt = u8Vector;
2451 pIemCpu->fCurXcpt = fFlags;
2452
2453 /*
2454 * Extensive logging.
2455 */
2456#if defined(LOG_ENABLED) && defined(IN_RING3)
2457 if (LogIs3Enabled())
2458 {
2459 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2460 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2461 char szRegs[4096];
2462 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2463 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2464 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2465 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2466 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2467 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2468 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2469 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2470 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2471 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2472 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2473 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2474 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2475 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2476 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2477 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2478 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2479 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2480 " efer=%016VR{efer}\n"
2481 " pat=%016VR{pat}\n"
2482 " sf_mask=%016VR{sf_mask}\n"
2483 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2484 " lstar=%016VR{lstar}\n"
2485 " star=%016VR{star} cstar=%016VR{cstar}\n"
2486 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2487 );
2488
2489 char szInstr[256];
2490 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2491 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2492 szInstr, sizeof(szInstr), NULL);
2493 Log3(("%s%s\n", szRegs, szInstr));
2494 }
2495#endif /* LOG_ENABLED */
2496
2497 /*
2498 * Call the mode specific worker function.
2499 */
2500 VBOXSTRICTRC rcStrict;
2501 if (!(pCtx->cr0 & X86_CR0_PE))
2502 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2503 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2504 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2505 else if (!pCtx->eflags.Bits.u1VM)
2506 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2507 else
2508 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2509
2510 /*
2511 * Unwind.
2512 */
2513 pIemCpu->cXcptRecursions--;
2514 pIemCpu->uCurXcpt = uPrevXcpt;
2515 pIemCpu->fCurXcpt = fPrevXcpt;
2516 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2517 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2518 return rcStrict;
2519}
2520
2521
2522/** \#DE - 00. */
2523DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2524{
2525 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2526}
2527
2528
2529/** \#DB - 01. */
2530DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2531{
2532 /** @todo set/clear RF. */
2533 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2534}
2535
2536
2537/** \#UD - 06. */
2538DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2539{
2540 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2541}
2542
2543
2544/** \#NM - 07. */
2545DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2546{
2547 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2548}
2549
2550
2551#ifdef SOME_UNUSED_FUNCTION
2552/** \#TS(err) - 0a. */
2553DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2554{
2555 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2556}
2557#endif
2558
2559
2560/** \#TS(tr) - 0a. */
2561DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2562{
2563 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2564 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2565}
2566
2567
2568/** \#NP(err) - 0b. */
2569DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2570{
2571 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2572}
2573
2574
2575/** \#NP(seg) - 0b. */
2576DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2577{
2578 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2579 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2580}
2581
2582
2583/** \#NP(sel) - 0b. */
2584DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2585{
2586 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2587 uSel & ~X86_SEL_RPL, 0);
2588}
2589
2590
2591/** \#SS(seg) - 0c. */
2592DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2593{
2594 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2595 uSel & ~X86_SEL_RPL, 0);
2596}
2597
2598
2599/** \#GP(n) - 0d. */
2600DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2601{
2602 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2603}
2604
2605
2606/** \#GP(0) - 0d. */
2607DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2608{
2609 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2610}
2611
2612
2613/** \#GP(sel) - 0d. */
2614DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2615{
2616 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2617 Sel & ~X86_SEL_RPL, 0);
2618}
2619
2620
2621/** \#GP(0) - 0d. */
2622DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2623{
2624 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2625}
2626
2627
2628/** \#GP(sel) - 0d. */
2629DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2630{
2631 NOREF(iSegReg); NOREF(fAccess);
2632 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2633 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2634}
2635
2636
2637/** \#GP(sel) - 0d. */
2638DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2639{
2640 NOREF(Sel);
2641 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2642}
2643
2644
2645/** \#GP(sel) - 0d. */
2646DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2647{
2648 NOREF(iSegReg); NOREF(fAccess);
2649 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2650}
2651
2652
2653/** \#PF(n) - 0e. */
2654DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2655{
2656 uint16_t uErr;
2657 switch (rc)
2658 {
2659 case VERR_PAGE_NOT_PRESENT:
2660 case VERR_PAGE_TABLE_NOT_PRESENT:
2661 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2662 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2663 uErr = 0;
2664 break;
2665
2666 default:
2667 AssertMsgFailed(("%Rrc\n", rc));
2668 case VERR_ACCESS_DENIED:
2669 uErr = X86_TRAP_PF_P;
2670 break;
2671
2672 /** @todo reserved */
2673 }
2674
2675 if (pIemCpu->uCpl == 3)
2676 uErr |= X86_TRAP_PF_US;
2677
2678 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2679 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2680 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2681 uErr |= X86_TRAP_PF_ID;
2682
2683 /* Note! RW access callers reporting a WRITE protection fault, will clear
2684 the READ flag before calling. So, read-modify-write accesses (RW)
2685 can safely be reported as READ faults. */
2686 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2687 uErr |= X86_TRAP_PF_RW;
2688
2689 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2690 uErr, GCPtrWhere);
2691}
2692
2693
2694/** \#MF(0) - 10. */
2695DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2696{
2697 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2698}
2699
2700
2701/** \#AC(0) - 11. */
2702DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2703{
2704 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2705}
2706
2707
2708/**
2709 * Macro for calling iemCImplRaiseDivideError().
2710 *
2711 * This enables us to add/remove arguments and force different levels of
2712 * inlining as we wish.
2713 *
2714 * @return Strict VBox status code.
2715 */
2716#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2717IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2718{
2719 NOREF(cbInstr);
2720 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2721}
2722
2723
2724/**
2725 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2726 *
2727 * This enables us to add/remove arguments and force different levels of
2728 * inlining as we wish.
2729 *
2730 * @return Strict VBox status code.
2731 */
2732#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2733IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2734{
2735 NOREF(cbInstr);
2736 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2737}
2738
2739
2740/**
2741 * Macro for calling iemCImplRaiseInvalidOpcode().
2742 *
2743 * This enables us to add/remove arguments and force different levels of
2744 * inlining as we wish.
2745 *
2746 * @return Strict VBox status code.
2747 */
2748#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2749IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2750{
2751 NOREF(cbInstr);
2752 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2753}
2754
2755
2756/** @} */
2757
2758
2759/*
2760 *
2761 * Helpers routines.
2762 * Helpers routines.
2763 * Helpers routines.
2764 *
2765 */
2766
2767/**
2768 * Recalculates the effective operand size.
2769 *
2770 * @param pIemCpu The IEM state.
2771 */
2772static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2773{
2774 switch (pIemCpu->enmCpuMode)
2775 {
2776 case IEMMODE_16BIT:
2777 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2778 break;
2779 case IEMMODE_32BIT:
2780 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2781 break;
2782 case IEMMODE_64BIT:
2783 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2784 {
2785 case 0:
2786 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2787 break;
2788 case IEM_OP_PRF_SIZE_OP:
2789 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2790 break;
2791 case IEM_OP_PRF_SIZE_REX_W:
2792 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2793 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2794 break;
2795 }
2796 break;
2797 default:
2798 AssertFailed();
2799 }
2800}
2801
2802
2803/**
2804 * Sets the default operand size to 64-bit and recalculates the effective
2805 * operand size.
2806 *
2807 * @param pIemCpu The IEM state.
2808 */
2809static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2810{
2811 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2812 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2813 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2814 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2815 else
2816 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2817}
2818
2819
2820/*
2821 *
2822 * Common opcode decoders.
2823 * Common opcode decoders.
2824 * Common opcode decoders.
2825 *
2826 */
2827//#include <iprt/mem.h>
2828
2829/**
2830 * Used to add extra details about a stub case.
2831 * @param pIemCpu The IEM per CPU state.
2832 */
2833static void iemOpStubMsg2(PIEMCPU pIemCpu)
2834{
2835#if defined(LOG_ENABLED) && defined(IN_RING3)
2836 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2837 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2838 char szRegs[4096];
2839 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2840 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2841 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2842 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2843 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2844 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2845 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2846 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2847 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2848 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2849 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2850 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2851 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2852 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2853 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2854 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2855 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2856 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2857 " efer=%016VR{efer}\n"
2858 " pat=%016VR{pat}\n"
2859 " sf_mask=%016VR{sf_mask}\n"
2860 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2861 " lstar=%016VR{lstar}\n"
2862 " star=%016VR{star} cstar=%016VR{cstar}\n"
2863 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2864 );
2865
2866 char szInstr[256];
2867 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2868 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2869 szInstr, sizeof(szInstr), NULL);
2870
2871 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2872#else
2873 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2874#endif
2875}
2876
2877/**
2878 * Complains about a stub.
2879 *
2880 * Providing two versions of this macro, one for daily use and one for use when
2881 * working on IEM.
2882 */
2883#if 0
2884# define IEMOP_BITCH_ABOUT_STUB() \
2885 do { \
2886 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2887 iemOpStubMsg2(pIemCpu); \
2888 RTAssertPanic(); \
2889 } while (0)
2890#else
2891# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
2892#endif
2893
2894/** Stubs an opcode. */
2895#define FNIEMOP_STUB(a_Name) \
2896 FNIEMOP_DEF(a_Name) \
2897 { \
2898 IEMOP_BITCH_ABOUT_STUB(); \
2899 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2900 } \
2901 typedef int ignore_semicolon
2902
2903/** Stubs an opcode. */
2904#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2905 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2906 { \
2907 IEMOP_BITCH_ABOUT_STUB(); \
2908 NOREF(a_Name0); \
2909 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2910 } \
2911 typedef int ignore_semicolon
2912
2913/** Stubs an opcode which currently should raise \#UD. */
2914#define FNIEMOP_UD_STUB(a_Name) \
2915 FNIEMOP_DEF(a_Name) \
2916 { \
2917 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2918 return IEMOP_RAISE_INVALID_OPCODE(); \
2919 } \
2920 typedef int ignore_semicolon
2921
2922/** Stubs an opcode which currently should raise \#UD. */
2923#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2924 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2925 { \
2926 NOREF(a_Name0); \
2927 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2928 return IEMOP_RAISE_INVALID_OPCODE(); \
2929 } \
2930 typedef int ignore_semicolon
2931
2932
2933
2934/** @name Register Access.
2935 * @{
2936 */
2937
2938/**
2939 * Gets a reference (pointer) to the specified hidden segment register.
2940 *
2941 * @returns Hidden register reference.
2942 * @param pIemCpu The per CPU data.
2943 * @param iSegReg The segment register.
2944 */
2945static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2946{
2947 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2948 PCPUMSELREG pSReg;
2949 switch (iSegReg)
2950 {
2951 case X86_SREG_ES: pSReg = &pCtx->es; break;
2952 case X86_SREG_CS: pSReg = &pCtx->cs; break;
2953 case X86_SREG_SS: pSReg = &pCtx->ss; break;
2954 case X86_SREG_DS: pSReg = &pCtx->ds; break;
2955 case X86_SREG_FS: pSReg = &pCtx->fs; break;
2956 case X86_SREG_GS: pSReg = &pCtx->gs; break;
2957 default:
2958 AssertFailedReturn(NULL);
2959 }
2960#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2961 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
2962 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
2963#else
2964 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2965#endif
2966 return pSReg;
2967}
2968
2969
2970/**
2971 * Gets a reference (pointer) to the specified segment register (the selector
2972 * value).
2973 *
2974 * @returns Pointer to the selector variable.
2975 * @param pIemCpu The per CPU data.
2976 * @param iSegReg The segment register.
2977 */
2978static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2979{
2980 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2981 switch (iSegReg)
2982 {
2983 case X86_SREG_ES: return &pCtx->es.Sel;
2984 case X86_SREG_CS: return &pCtx->cs.Sel;
2985 case X86_SREG_SS: return &pCtx->ss.Sel;
2986 case X86_SREG_DS: return &pCtx->ds.Sel;
2987 case X86_SREG_FS: return &pCtx->fs.Sel;
2988 case X86_SREG_GS: return &pCtx->gs.Sel;
2989 }
2990 AssertFailedReturn(NULL);
2991}
2992
2993
2994/**
2995 * Fetches the selector value of a segment register.
2996 *
2997 * @returns The selector value.
2998 * @param pIemCpu The per CPU data.
2999 * @param iSegReg The segment register.
3000 */
3001static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3002{
3003 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3004 switch (iSegReg)
3005 {
3006 case X86_SREG_ES: return pCtx->es.Sel;
3007 case X86_SREG_CS: return pCtx->cs.Sel;
3008 case X86_SREG_SS: return pCtx->ss.Sel;
3009 case X86_SREG_DS: return pCtx->ds.Sel;
3010 case X86_SREG_FS: return pCtx->fs.Sel;
3011 case X86_SREG_GS: return pCtx->gs.Sel;
3012 }
3013 AssertFailedReturn(0xffff);
3014}
3015
3016
3017/**
3018 * Gets a reference (pointer) to the specified general register.
3019 *
3020 * @returns Register reference.
3021 * @param pIemCpu The per CPU data.
3022 * @param iReg The general register.
3023 */
3024static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3025{
3026 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3027 switch (iReg)
3028 {
3029 case X86_GREG_xAX: return &pCtx->rax;
3030 case X86_GREG_xCX: return &pCtx->rcx;
3031 case X86_GREG_xDX: return &pCtx->rdx;
3032 case X86_GREG_xBX: return &pCtx->rbx;
3033 case X86_GREG_xSP: return &pCtx->rsp;
3034 case X86_GREG_xBP: return &pCtx->rbp;
3035 case X86_GREG_xSI: return &pCtx->rsi;
3036 case X86_GREG_xDI: return &pCtx->rdi;
3037 case X86_GREG_x8: return &pCtx->r8;
3038 case X86_GREG_x9: return &pCtx->r9;
3039 case X86_GREG_x10: return &pCtx->r10;
3040 case X86_GREG_x11: return &pCtx->r11;
3041 case X86_GREG_x12: return &pCtx->r12;
3042 case X86_GREG_x13: return &pCtx->r13;
3043 case X86_GREG_x14: return &pCtx->r14;
3044 case X86_GREG_x15: return &pCtx->r15;
3045 }
3046 AssertFailedReturn(NULL);
3047}
3048
3049
3050/**
3051 * Gets a reference (pointer) to the specified 8-bit general register.
3052 *
3053 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3054 *
3055 * @returns Register reference.
3056 * @param pIemCpu The per CPU data.
3057 * @param iReg The register.
3058 */
3059static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3060{
3061 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3062 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3063
3064 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3065 if (iReg >= 4)
3066 pu8Reg++;
3067 return pu8Reg;
3068}
3069
3070
3071/**
3072 * Fetches the value of a 8-bit general register.
3073 *
3074 * @returns The register value.
3075 * @param pIemCpu The per CPU data.
3076 * @param iReg The register.
3077 */
3078static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3079{
3080 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3081 return *pbSrc;
3082}
3083
3084
3085/**
3086 * Fetches the value of a 16-bit general register.
3087 *
3088 * @returns The register value.
3089 * @param pIemCpu The per CPU data.
3090 * @param iReg The register.
3091 */
3092static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3093{
3094 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3095}
3096
3097
3098/**
3099 * Fetches the value of a 32-bit general register.
3100 *
3101 * @returns The register value.
3102 * @param pIemCpu The per CPU data.
3103 * @param iReg The register.
3104 */
3105static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3106{
3107 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3108}
3109
3110
3111/**
3112 * Fetches the value of a 64-bit general register.
3113 *
3114 * @returns The register value.
3115 * @param pIemCpu The per CPU data.
3116 * @param iReg The register.
3117 */
3118static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3119{
3120 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3121}
3122
3123
3124/**
3125 * Is the FPU state in FXSAVE format or not.
3126 *
3127 * @returns true if it is, false if it's in FNSAVE.
3128 * @param pVCpu Pointer to the VMCPU.
3129 */
3130DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3131{
3132#ifdef RT_ARCH_AMD64
3133 NOREF(pIemCpu);
3134 return true;
3135#else
3136 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3137 return true;
3138#endif
3139}
3140
3141
3142/**
3143 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3144 *
3145 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3146 * segment limit.
3147 *
3148 * @param pIemCpu The per CPU data.
3149 * @param offNextInstr The offset of the next instruction.
3150 */
3151static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3152{
3153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3154 switch (pIemCpu->enmEffOpSize)
3155 {
3156 case IEMMODE_16BIT:
3157 {
3158 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3159 if ( uNewIp > pCtx->cs.u32Limit
3160 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3161 return iemRaiseGeneralProtectionFault0(pIemCpu);
3162 pCtx->rip = uNewIp;
3163 break;
3164 }
3165
3166 case IEMMODE_32BIT:
3167 {
3168 Assert(pCtx->rip <= UINT32_MAX);
3169 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3170
3171 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3172 if (uNewEip > pCtx->cs.u32Limit)
3173 return iemRaiseGeneralProtectionFault0(pIemCpu);
3174 pCtx->rip = uNewEip;
3175 break;
3176 }
3177
3178 case IEMMODE_64BIT:
3179 {
3180 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3181
3182 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3183 if (!IEM_IS_CANONICAL(uNewRip))
3184 return iemRaiseGeneralProtectionFault0(pIemCpu);
3185 pCtx->rip = uNewRip;
3186 break;
3187 }
3188
3189 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3190 }
3191
3192 return VINF_SUCCESS;
3193}
3194
3195
3196/**
3197 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3198 *
3199 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3200 * segment limit.
3201 *
3202 * @returns Strict VBox status code.
3203 * @param pIemCpu The per CPU data.
3204 * @param offNextInstr The offset of the next instruction.
3205 */
3206static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3207{
3208 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3209 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3210
3211 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3212 if ( uNewIp > pCtx->cs.u32Limit
3213 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3214 return iemRaiseGeneralProtectionFault0(pIemCpu);
3215 /** @todo Test 16-bit jump in 64-bit mode. */
3216 pCtx->rip = uNewIp;
3217
3218 return VINF_SUCCESS;
3219}
3220
3221
3222/**
3223 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3224 *
3225 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3226 * segment limit.
3227 *
3228 * @returns Strict VBox status code.
3229 * @param pIemCpu The per CPU data.
3230 * @param offNextInstr The offset of the next instruction.
3231 */
3232static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3233{
3234 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3235 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3236
3237 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3238 {
3239 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3240
3241 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3242 if (uNewEip > pCtx->cs.u32Limit)
3243 return iemRaiseGeneralProtectionFault0(pIemCpu);
3244 pCtx->rip = uNewEip;
3245 }
3246 else
3247 {
3248 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3249
3250 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3251 if (!IEM_IS_CANONICAL(uNewRip))
3252 return iemRaiseGeneralProtectionFault0(pIemCpu);
3253 pCtx->rip = uNewRip;
3254 }
3255 return VINF_SUCCESS;
3256}
3257
3258
3259/**
3260 * Performs a near jump to the specified address.
3261 *
3262 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3263 * segment limit.
3264 *
3265 * @param pIemCpu The per CPU data.
3266 * @param uNewRip The new RIP value.
3267 */
3268static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3269{
3270 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3271 switch (pIemCpu->enmEffOpSize)
3272 {
3273 case IEMMODE_16BIT:
3274 {
3275 Assert(uNewRip <= UINT16_MAX);
3276 if ( uNewRip > pCtx->cs.u32Limit
3277 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3278 return iemRaiseGeneralProtectionFault0(pIemCpu);
3279 /** @todo Test 16-bit jump in 64-bit mode. */
3280 pCtx->rip = uNewRip;
3281 break;
3282 }
3283
3284 case IEMMODE_32BIT:
3285 {
3286 Assert(uNewRip <= UINT32_MAX);
3287 Assert(pCtx->rip <= UINT32_MAX);
3288 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3289
3290 if (uNewRip > pCtx->cs.u32Limit)
3291 return iemRaiseGeneralProtectionFault0(pIemCpu);
3292 pCtx->rip = uNewRip;
3293 break;
3294 }
3295
3296 case IEMMODE_64BIT:
3297 {
3298 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3299
3300 if (!IEM_IS_CANONICAL(uNewRip))
3301 return iemRaiseGeneralProtectionFault0(pIemCpu);
3302 pCtx->rip = uNewRip;
3303 break;
3304 }
3305
3306 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3307 }
3308
3309 return VINF_SUCCESS;
3310}
3311
3312
3313/**
3314 * Get the address of the top of the stack.
3315 *
3316 * @param pCtx The CPU context which SP/ESP/RSP should be
3317 * read.
3318 */
3319DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3320{
3321 if (pCtx->ss.Attr.n.u1Long)
3322 return pCtx->rsp;
3323 if (pCtx->ss.Attr.n.u1DefBig)
3324 return pCtx->esp;
3325 return pCtx->sp;
3326}
3327
3328
3329/**
3330 * Updates the RIP/EIP/IP to point to the next instruction.
3331 *
3332 * @param pIemCpu The per CPU data.
3333 * @param cbInstr The number of bytes to add.
3334 */
3335static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3336{
3337 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3338 switch (pIemCpu->enmCpuMode)
3339 {
3340 case IEMMODE_16BIT:
3341 Assert(pCtx->rip <= UINT16_MAX);
3342 pCtx->eip += cbInstr;
3343 pCtx->eip &= UINT32_C(0xffff);
3344 break;
3345
3346 case IEMMODE_32BIT:
3347 pCtx->eip += cbInstr;
3348 Assert(pCtx->rip <= UINT32_MAX);
3349 break;
3350
3351 case IEMMODE_64BIT:
3352 pCtx->rip += cbInstr;
3353 break;
3354 default: AssertFailed();
3355 }
3356}
3357
3358
3359/**
3360 * Updates the RIP/EIP/IP to point to the next instruction.
3361 *
3362 * @param pIemCpu The per CPU data.
3363 */
3364static void iemRegUpdateRip(PIEMCPU pIemCpu)
3365{
3366 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3367}
3368
3369
3370/**
3371 * Adds to the stack pointer.
3372 *
3373 * @param pCtx The CPU context which SP/ESP/RSP should be
3374 * updated.
3375 * @param cbToAdd The number of bytes to add.
3376 */
3377DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3378{
3379 if (pCtx->ss.Attr.n.u1Long)
3380 pCtx->rsp += cbToAdd;
3381 else if (pCtx->ss.Attr.n.u1DefBig)
3382 pCtx->esp += cbToAdd;
3383 else
3384 pCtx->sp += cbToAdd;
3385}
3386
3387
3388/**
3389 * Subtracts from the stack pointer.
3390 *
3391 * @param pCtx The CPU context which SP/ESP/RSP should be
3392 * updated.
3393 * @param cbToSub The number of bytes to subtract.
3394 */
3395DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3396{
3397 if (pCtx->ss.Attr.n.u1Long)
3398 pCtx->rsp -= cbToSub;
3399 else if (pCtx->ss.Attr.n.u1DefBig)
3400 pCtx->esp -= cbToSub;
3401 else
3402 pCtx->sp -= cbToSub;
3403}
3404
3405
3406/**
3407 * Adds to the temporary stack pointer.
3408 *
3409 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3410 * @param cbToAdd The number of bytes to add.
3411 * @param pCtx Where to get the current stack mode.
3412 */
3413DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3414{
3415 if (pCtx->ss.Attr.n.u1Long)
3416 pTmpRsp->u += cbToAdd;
3417 else if (pCtx->ss.Attr.n.u1DefBig)
3418 pTmpRsp->DWords.dw0 += cbToAdd;
3419 else
3420 pTmpRsp->Words.w0 += cbToAdd;
3421}
3422
3423
3424/**
3425 * Subtracts from the temporary stack pointer.
3426 *
3427 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3428 * @param cbToSub The number of bytes to subtract.
3429 * @param pCtx Where to get the current stack mode.
3430 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3431 * expecting that.
3432 */
3433DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3434{
3435 if (pCtx->ss.Attr.n.u1Long)
3436 pTmpRsp->u -= cbToSub;
3437 else if (pCtx->ss.Attr.n.u1DefBig)
3438 pTmpRsp->DWords.dw0 -= cbToSub;
3439 else
3440 pTmpRsp->Words.w0 -= cbToSub;
3441}
3442
3443
3444/**
3445 * Calculates the effective stack address for a push of the specified size as
3446 * well as the new RSP value (upper bits may be masked).
3447 *
3448 * @returns Effective stack addressf for the push.
3449 * @param pCtx Where to get the current stack mode.
3450 * @param cbItem The size of the stack item to pop.
3451 * @param puNewRsp Where to return the new RSP value.
3452 */
3453DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3454{
3455 RTUINT64U uTmpRsp;
3456 RTGCPTR GCPtrTop;
3457 uTmpRsp.u = pCtx->rsp;
3458
3459 if (pCtx->ss.Attr.n.u1Long)
3460 GCPtrTop = uTmpRsp.u -= cbItem;
3461 else if (pCtx->ss.Attr.n.u1DefBig)
3462 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3463 else
3464 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3465 *puNewRsp = uTmpRsp.u;
3466 return GCPtrTop;
3467}
3468
3469
3470/**
3471 * Gets the current stack pointer and calculates the value after a pop of the
3472 * specified size.
3473 *
3474 * @returns Current stack pointer.
3475 * @param pCtx Where to get the current stack mode.
3476 * @param cbItem The size of the stack item to pop.
3477 * @param puNewRsp Where to return the new RSP value.
3478 */
3479DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3480{
3481 RTUINT64U uTmpRsp;
3482 RTGCPTR GCPtrTop;
3483 uTmpRsp.u = pCtx->rsp;
3484
3485 if (pCtx->ss.Attr.n.u1Long)
3486 {
3487 GCPtrTop = uTmpRsp.u;
3488 uTmpRsp.u += cbItem;
3489 }
3490 else if (pCtx->ss.Attr.n.u1DefBig)
3491 {
3492 GCPtrTop = uTmpRsp.DWords.dw0;
3493 uTmpRsp.DWords.dw0 += cbItem;
3494 }
3495 else
3496 {
3497 GCPtrTop = uTmpRsp.Words.w0;
3498 uTmpRsp.Words.w0 += cbItem;
3499 }
3500 *puNewRsp = uTmpRsp.u;
3501 return GCPtrTop;
3502}
3503
3504
3505/**
3506 * Calculates the effective stack address for a push of the specified size as
3507 * well as the new temporary RSP value (upper bits may be masked).
3508 *
3509 * @returns Effective stack addressf for the push.
3510 * @param pTmpRsp The temporary stack pointer. This is updated.
3511 * @param cbItem The size of the stack item to pop.
3512 * @param puNewRsp Where to return the new RSP value.
3513 */
3514DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3515{
3516 RTGCPTR GCPtrTop;
3517
3518 if (pCtx->ss.Attr.n.u1Long)
3519 GCPtrTop = pTmpRsp->u -= cbItem;
3520 else if (pCtx->ss.Attr.n.u1DefBig)
3521 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3522 else
3523 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3524 return GCPtrTop;
3525}
3526
3527
3528/**
3529 * Gets the effective stack address for a pop of the specified size and
3530 * calculates and updates the temporary RSP.
3531 *
3532 * @returns Current stack pointer.
3533 * @param pTmpRsp The temporary stack pointer. This is updated.
3534 * @param pCtx Where to get the current stack mode.
3535 * @param cbItem The size of the stack item to pop.
3536 */
3537DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3538{
3539 RTGCPTR GCPtrTop;
3540 if (pCtx->ss.Attr.n.u1Long)
3541 {
3542 GCPtrTop = pTmpRsp->u;
3543 pTmpRsp->u += cbItem;
3544 }
3545 else if (pCtx->ss.Attr.n.u1DefBig)
3546 {
3547 GCPtrTop = pTmpRsp->DWords.dw0;
3548 pTmpRsp->DWords.dw0 += cbItem;
3549 }
3550 else
3551 {
3552 GCPtrTop = pTmpRsp->Words.w0;
3553 pTmpRsp->Words.w0 += cbItem;
3554 }
3555 return GCPtrTop;
3556}
3557
3558
3559/**
3560 * Checks if an Intel CPUID feature bit is set.
3561 *
3562 * @returns true / false.
3563 *
3564 * @param pIemCpu The IEM per CPU data.
3565 * @param fEdx The EDX bit to test, or 0 if ECX.
3566 * @param fEcx The ECX bit to test, or 0 if EDX.
3567 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3568 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3569 */
3570static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3571{
3572 uint32_t uEax, uEbx, uEcx, uEdx;
3573 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3574 return (fEcx && (uEcx & fEcx))
3575 || (fEdx && (uEdx & fEdx));
3576}
3577
3578
3579/**
3580 * Checks if an AMD CPUID feature bit is set.
3581 *
3582 * @returns true / false.
3583 *
3584 * @param pIemCpu The IEM per CPU data.
3585 * @param fEdx The EDX bit to test, or 0 if ECX.
3586 * @param fEcx The ECX bit to test, or 0 if EDX.
3587 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3588 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3589 */
3590static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3591{
3592 uint32_t uEax, uEbx, uEcx, uEdx;
3593 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3594 return (fEcx && (uEcx & fEcx))
3595 || (fEdx && (uEdx & fEdx));
3596}
3597
3598/** @} */
3599
3600
3601/** @name FPU access and helpers.
3602 *
3603 * @{
3604 */
3605
3606
3607/**
3608 * Hook for preparing to use the host FPU.
3609 *
3610 * This is necessary in ring-0 and raw-mode context.
3611 *
3612 * @param pIemCpu The IEM per CPU data.
3613 */
3614DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3615{
3616#ifdef IN_RING3
3617 NOREF(pIemCpu);
3618#else
3619/** @todo RZ: FIXME */
3620//# error "Implement me"
3621#endif
3622}
3623
3624
3625/**
3626 * Stores a QNaN value into a FPU register.
3627 *
3628 * @param pReg Pointer to the register.
3629 */
3630DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3631{
3632 pReg->au32[0] = UINT32_C(0x00000000);
3633 pReg->au32[1] = UINT32_C(0xc0000000);
3634 pReg->au16[4] = UINT16_C(0xffff);
3635}
3636
3637
3638/**
3639 * Updates the FOP, FPU.CS and FPUIP registers.
3640 *
3641 * @param pIemCpu The IEM per CPU data.
3642 * @param pCtx The CPU context.
3643 */
3644DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3645{
3646 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3647 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3648 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3649 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3650 {
3651 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3652 * happens in real mode here based on the fnsave and fnstenv images. */
3653 pCtx->fpu.CS = 0;
3654 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3655 }
3656 else
3657 {
3658 pCtx->fpu.CS = pCtx->cs.Sel;
3659 pCtx->fpu.FPUIP = pCtx->rip;
3660 }
3661}
3662
3663
3664/**
3665 * Updates the FPU.DS and FPUDP registers.
3666 *
3667 * @param pIemCpu The IEM per CPU data.
3668 * @param pCtx The CPU context.
3669 * @param iEffSeg The effective segment register.
3670 * @param GCPtrEff The effective address relative to @a iEffSeg.
3671 */
3672DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3673{
3674 RTSEL sel;
3675 switch (iEffSeg)
3676 {
3677 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3678 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3679 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3680 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3681 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3682 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3683 default:
3684 AssertMsgFailed(("%d\n", iEffSeg));
3685 sel = pCtx->ds.Sel;
3686 }
3687 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3688 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3689 {
3690 pCtx->fpu.DS = 0;
3691 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3692 }
3693 else
3694 {
3695 pCtx->fpu.DS = sel;
3696 pCtx->fpu.FPUDP = GCPtrEff;
3697 }
3698}
3699
3700
3701/**
3702 * Rotates the stack registers in the push direction.
3703 *
3704 * @param pCtx The CPU context.
3705 * @remarks This is a complete waste of time, but fxsave stores the registers in
3706 * stack order.
3707 */
3708DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3709{
3710 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3711 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3712 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3713 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3714 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3715 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3716 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3717 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3718 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3719}
3720
3721
3722/**
3723 * Rotates the stack registers in the pop direction.
3724 *
3725 * @param pCtx The CPU context.
3726 * @remarks This is a complete waste of time, but fxsave stores the registers in
3727 * stack order.
3728 */
3729DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3730{
3731 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3732 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3733 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3734 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3735 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3736 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3737 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3738 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3739 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3740}
3741
3742
3743/**
3744 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3745 * exception prevents it.
3746 *
3747 * @param pIemCpu The IEM per CPU data.
3748 * @param pResult The FPU operation result to push.
3749 * @param pCtx The CPU context.
3750 */
3751static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3752{
3753 /* Update FSW and bail if there are pending exceptions afterwards. */
3754 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3755 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3756 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3757 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3758 {
3759 pCtx->fpu.FSW = fFsw;
3760 return;
3761 }
3762
3763 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3764 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3765 {
3766 /* All is fine, push the actual value. */
3767 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3768 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3769 }
3770 else if (pCtx->fpu.FCW & X86_FCW_IM)
3771 {
3772 /* Masked stack overflow, push QNaN. */
3773 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3774 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3775 }
3776 else
3777 {
3778 /* Raise stack overflow, don't push anything. */
3779 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3780 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3781 return;
3782 }
3783
3784 fFsw &= ~X86_FSW_TOP_MASK;
3785 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3786 pCtx->fpu.FSW = fFsw;
3787
3788 iemFpuRotateStackPush(pCtx);
3789}
3790
3791
3792/**
3793 * Stores a result in a FPU register and updates the FSW and FTW.
3794 *
3795 * @param pIemCpu The IEM per CPU data.
3796 * @param pResult The result to store.
3797 * @param iStReg Which FPU register to store it in.
3798 * @param pCtx The CPU context.
3799 */
3800static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3801{
3802 Assert(iStReg < 8);
3803 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3804 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3805 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3806 pCtx->fpu.FTW |= RT_BIT(iReg);
3807 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3808}
3809
3810
3811/**
3812 * Only updates the FPU status word (FSW) with the result of the current
3813 * instruction.
3814 *
3815 * @param pCtx The CPU context.
3816 * @param u16FSW The FSW output of the current instruction.
3817 */
3818static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3819{
3820 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3821 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3822}
3823
3824
3825/**
3826 * Pops one item off the FPU stack if no pending exception prevents it.
3827 *
3828 * @param pCtx The CPU context.
3829 */
3830static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3831{
3832 /* Check pending exceptions. */
3833 uint16_t uFSW = pCtx->fpu.FSW;
3834 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3835 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3836 return;
3837
3838 /* TOP--. */
3839 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3840 uFSW &= ~X86_FSW_TOP_MASK;
3841 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3842 pCtx->fpu.FSW = uFSW;
3843
3844 /* Mark the previous ST0 as empty. */
3845 iOldTop >>= X86_FSW_TOP_SHIFT;
3846 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3847
3848 /* Rotate the registers. */
3849 iemFpuRotateStackPop(pCtx);
3850}
3851
3852
3853/**
3854 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3855 *
3856 * @param pIemCpu The IEM per CPU data.
3857 * @param pResult The FPU operation result to push.
3858 */
3859static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3860{
3861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3862 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3863 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3864}
3865
3866
3867/**
3868 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3869 * and sets FPUDP and FPUDS.
3870 *
3871 * @param pIemCpu The IEM per CPU data.
3872 * @param pResult The FPU operation result to push.
3873 * @param iEffSeg The effective segment register.
3874 * @param GCPtrEff The effective address relative to @a iEffSeg.
3875 */
3876static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3877{
3878 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3879 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3880 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3881 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3882}
3883
3884
3885/**
3886 * Replace ST0 with the first value and push the second onto the FPU stack,
3887 * unless a pending exception prevents it.
3888 *
3889 * @param pIemCpu The IEM per CPU data.
3890 * @param pResult The FPU operation result to store and push.
3891 */
3892static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3893{
3894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3895 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3896
3897 /* Update FSW and bail if there are pending exceptions afterwards. */
3898 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3899 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3900 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3901 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3902 {
3903 pCtx->fpu.FSW = fFsw;
3904 return;
3905 }
3906
3907 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3908 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3909 {
3910 /* All is fine, push the actual value. */
3911 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3912 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3913 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3914 }
3915 else if (pCtx->fpu.FCW & X86_FCW_IM)
3916 {
3917 /* Masked stack overflow, push QNaN. */
3918 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3919 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3920 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3921 }
3922 else
3923 {
3924 /* Raise stack overflow, don't push anything. */
3925 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3926 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3927 return;
3928 }
3929
3930 fFsw &= ~X86_FSW_TOP_MASK;
3931 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3932 pCtx->fpu.FSW = fFsw;
3933
3934 iemFpuRotateStackPush(pCtx);
3935}
3936
3937
3938/**
3939 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3940 * FOP.
3941 *
3942 * @param pIemCpu The IEM per CPU data.
3943 * @param pResult The result to store.
3944 * @param iStReg Which FPU register to store it in.
3945 * @param pCtx The CPU context.
3946 */
3947static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3948{
3949 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3950 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3951 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3952}
3953
3954
3955/**
3956 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3957 * FOP, and then pops the stack.
3958 *
3959 * @param pIemCpu The IEM per CPU data.
3960 * @param pResult The result to store.
3961 * @param iStReg Which FPU register to store it in.
3962 * @param pCtx The CPU context.
3963 */
3964static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3965{
3966 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3967 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3968 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3969 iemFpuMaybePopOne(pCtx);
3970}
3971
3972
3973/**
3974 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3975 * FPUDP, and FPUDS.
3976 *
3977 * @param pIemCpu The IEM per CPU data.
3978 * @param pResult The result to store.
3979 * @param iStReg Which FPU register to store it in.
3980 * @param pCtx The CPU context.
3981 * @param iEffSeg The effective memory operand selector register.
3982 * @param GCPtrEff The effective memory operand offset.
3983 */
3984static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3985{
3986 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3987 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3988 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3989 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3990}
3991
3992
3993/**
3994 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3995 * FPUDP, and FPUDS, and then pops the stack.
3996 *
3997 * @param pIemCpu The IEM per CPU data.
3998 * @param pResult The result to store.
3999 * @param iStReg Which FPU register to store it in.
4000 * @param pCtx The CPU context.
4001 * @param iEffSeg The effective memory operand selector register.
4002 * @param GCPtrEff The effective memory operand offset.
4003 */
4004static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4005 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4006{
4007 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4008 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4009 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4010 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4011 iemFpuMaybePopOne(pCtx);
4012}
4013
4014
4015/**
4016 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4017 *
4018 * @param pIemCpu The IEM per CPU data.
4019 */
4020static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4021{
4022 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4023}
4024
4025
4026/**
4027 * Marks the specified stack register as free (for FFREE).
4028 *
4029 * @param pIemCpu The IEM per CPU data.
4030 * @param iStReg The register to free.
4031 */
4032static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4033{
4034 Assert(iStReg < 8);
4035 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4036 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4037 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4038}
4039
4040
4041/**
4042 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4043 *
4044 * @param pIemCpu The IEM per CPU data.
4045 */
4046static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4047{
4048 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4049 uint16_t uFsw = pCtx->fpu.FSW;
4050 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4051 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4052 uFsw &= ~X86_FSW_TOP_MASK;
4053 uFsw |= uTop;
4054 pCtx->fpu.FSW = uFsw;
4055}
4056
4057
4058/**
4059 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4060 *
4061 * @param pIemCpu The IEM per CPU data.
4062 */
4063static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4064{
4065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4066 uint16_t uFsw = pCtx->fpu.FSW;
4067 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4068 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4069 uFsw &= ~X86_FSW_TOP_MASK;
4070 uFsw |= uTop;
4071 pCtx->fpu.FSW = uFsw;
4072}
4073
4074
4075/**
4076 * Updates the FSW, FOP, FPUIP, and FPUCS.
4077 *
4078 * @param pIemCpu The IEM per CPU data.
4079 * @param u16FSW The FSW from the current instruction.
4080 */
4081static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4082{
4083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4084 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4085 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4086}
4087
4088
4089/**
4090 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4091 *
4092 * @param pIemCpu The IEM per CPU data.
4093 * @param u16FSW The FSW from the current instruction.
4094 */
4095static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4096{
4097 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4098 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4099 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4100 iemFpuMaybePopOne(pCtx);
4101}
4102
4103
4104/**
4105 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4106 *
4107 * @param pIemCpu The IEM per CPU data.
4108 * @param u16FSW The FSW from the current instruction.
4109 * @param iEffSeg The effective memory operand selector register.
4110 * @param GCPtrEff The effective memory operand offset.
4111 */
4112static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4113{
4114 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4115 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4116 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4117 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4118}
4119
4120
4121/**
4122 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4123 *
4124 * @param pIemCpu The IEM per CPU data.
4125 * @param u16FSW The FSW from the current instruction.
4126 */
4127static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4128{
4129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4130 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4131 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4132 iemFpuMaybePopOne(pCtx);
4133 iemFpuMaybePopOne(pCtx);
4134}
4135
4136
4137/**
4138 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4139 *
4140 * @param pIemCpu The IEM per CPU data.
4141 * @param u16FSW The FSW from the current instruction.
4142 * @param iEffSeg The effective memory operand selector register.
4143 * @param GCPtrEff The effective memory operand offset.
4144 */
4145static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4146{
4147 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4148 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4149 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4150 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4151 iemFpuMaybePopOne(pCtx);
4152}
4153
4154
4155/**
4156 * Worker routine for raising an FPU stack underflow exception.
4157 *
4158 * @param pIemCpu The IEM per CPU data.
4159 * @param iStReg The stack register being accessed.
4160 * @param pCtx The CPU context.
4161 */
4162static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4163{
4164 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4165 if (pCtx->fpu.FCW & X86_FCW_IM)
4166 {
4167 /* Masked underflow. */
4168 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4169 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4170 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4171 if (iStReg != UINT8_MAX)
4172 {
4173 pCtx->fpu.FTW |= RT_BIT(iReg);
4174 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4175 }
4176 }
4177 else
4178 {
4179 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4180 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4181 }
4182}
4183
4184
4185/**
4186 * Raises a FPU stack underflow exception.
4187 *
4188 * @param pIemCpu The IEM per CPU data.
4189 * @param iStReg The destination register that should be loaded
4190 * with QNaN if \#IS is not masked. Specify
4191 * UINT8_MAX if none (like for fcom).
4192 */
4193DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4194{
4195 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4196 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4197 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4198}
4199
4200
4201DECL_NO_INLINE(static, void)
4202iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4203{
4204 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4205 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4206 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4207 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4208}
4209
4210
4211DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4212{
4213 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4214 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4215 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4216 iemFpuMaybePopOne(pCtx);
4217}
4218
4219
4220DECL_NO_INLINE(static, void)
4221iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4222{
4223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4224 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4225 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4226 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4227 iemFpuMaybePopOne(pCtx);
4228}
4229
4230
4231DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4232{
4233 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4234 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4235 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4236 iemFpuMaybePopOne(pCtx);
4237 iemFpuMaybePopOne(pCtx);
4238}
4239
4240
4241DECL_NO_INLINE(static, void)
4242iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4243{
4244 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4245 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4246
4247 if (pCtx->fpu.FCW & X86_FCW_IM)
4248 {
4249 /* Masked overflow - Push QNaN. */
4250 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4251 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4252 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4253 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4254 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4255 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4256 iemFpuRotateStackPush(pCtx);
4257 }
4258 else
4259 {
4260 /* Exception pending - don't change TOP or the register stack. */
4261 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4262 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4263 }
4264}
4265
4266
4267DECL_NO_INLINE(static, void)
4268iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4269{
4270 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4271 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4272
4273 if (pCtx->fpu.FCW & X86_FCW_IM)
4274 {
4275 /* Masked overflow - Push QNaN. */
4276 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4277 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4278 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4279 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4280 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4281 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4282 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4283 iemFpuRotateStackPush(pCtx);
4284 }
4285 else
4286 {
4287 /* Exception pending - don't change TOP or the register stack. */
4288 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4289 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4290 }
4291}
4292
4293
4294/**
4295 * Worker routine for raising an FPU stack overflow exception on a push.
4296 *
4297 * @param pIemCpu The IEM per CPU data.
4298 * @param pCtx The CPU context.
4299 */
4300static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4301{
4302 if (pCtx->fpu.FCW & X86_FCW_IM)
4303 {
4304 /* Masked overflow. */
4305 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4306 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4307 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4308 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4309 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4310 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4311 iemFpuRotateStackPush(pCtx);
4312 }
4313 else
4314 {
4315 /* Exception pending - don't change TOP or the register stack. */
4316 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4317 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4318 }
4319}
4320
4321
4322/**
4323 * Raises a FPU stack overflow exception on a push.
4324 *
4325 * @param pIemCpu The IEM per CPU data.
4326 */
4327DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4328{
4329 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4330 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4331 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4332}
4333
4334
4335/**
4336 * Raises a FPU stack overflow exception on a push with a memory operand.
4337 *
4338 * @param pIemCpu The IEM per CPU data.
4339 * @param iEffSeg The effective memory operand selector register.
4340 * @param GCPtrEff The effective memory operand offset.
4341 */
4342DECL_NO_INLINE(static, void)
4343iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4344{
4345 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4346 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4347 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4348 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4349}
4350
4351
4352static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4353{
4354 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4355 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4356 if (pCtx->fpu.FTW & RT_BIT(iReg))
4357 return VINF_SUCCESS;
4358 return VERR_NOT_FOUND;
4359}
4360
4361
4362static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4363{
4364 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4365 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4366 if (pCtx->fpu.FTW & RT_BIT(iReg))
4367 {
4368 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4369 return VINF_SUCCESS;
4370 }
4371 return VERR_NOT_FOUND;
4372}
4373
4374
4375static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4376 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4377{
4378 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4379 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4380 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4381 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4382 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4383 {
4384 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4385 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4386 return VINF_SUCCESS;
4387 }
4388 return VERR_NOT_FOUND;
4389}
4390
4391
4392static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4393{
4394 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4395 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4396 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4397 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4398 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4399 {
4400 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4401 return VINF_SUCCESS;
4402 }
4403 return VERR_NOT_FOUND;
4404}
4405
4406
4407/**
4408 * Updates the FPU exception status after FCW is changed.
4409 *
4410 * @param pCtx The CPU context.
4411 */
4412static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4413{
4414 uint16_t u16Fsw = pCtx->fpu.FSW;
4415 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4416 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4417 else
4418 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4419 pCtx->fpu.FSW = u16Fsw;
4420}
4421
4422
4423/**
4424 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4425 *
4426 * @returns The full FTW.
4427 * @param pCtx The CPU state.
4428 */
4429static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4430{
4431 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4432 uint16_t u16Ftw = 0;
4433 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4434 for (unsigned iSt = 0; iSt < 8; iSt++)
4435 {
4436 unsigned const iReg = (iSt + iTop) & 7;
4437 if (!(u8Ftw & RT_BIT(iReg)))
4438 u16Ftw |= 3 << (iReg * 2); /* empty */
4439 else
4440 {
4441 uint16_t uTag;
4442 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4443 if (pr80Reg->s.uExponent == 0x7fff)
4444 uTag = 2; /* Exponent is all 1's => Special. */
4445 else if (pr80Reg->s.uExponent == 0x0000)
4446 {
4447 if (pr80Reg->s.u64Mantissa == 0x0000)
4448 uTag = 1; /* All bits are zero => Zero. */
4449 else
4450 uTag = 2; /* Must be special. */
4451 }
4452 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4453 uTag = 0; /* Valid. */
4454 else
4455 uTag = 2; /* Must be special. */
4456
4457 u16Ftw |= uTag << (iReg * 2); /* empty */
4458 }
4459 }
4460
4461 return u16Ftw;
4462}
4463
4464
4465/**
4466 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4467 *
4468 * @returns The compressed FTW.
4469 * @param u16FullFtw The full FTW to convert.
4470 */
4471static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4472{
4473 uint8_t u8Ftw = 0;
4474 for (unsigned i = 0; i < 8; i++)
4475 {
4476 if ((u16FullFtw & 3) != 3 /*empty*/)
4477 u8Ftw |= RT_BIT(i);
4478 u16FullFtw >>= 2;
4479 }
4480
4481 return u8Ftw;
4482}
4483
4484/** @} */
4485
4486
4487/** @name Memory access.
4488 *
4489 * @{
4490 */
4491
4492
4493/**
4494 * Updates the IEMCPU::cbWritten counter if applicable.
4495 *
4496 * @param pIemCpu The IEM per CPU data.
4497 * @param fAccess The access being accounted for.
4498 * @param cbMem The access size.
4499 */
4500DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4501{
4502 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4503 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4504 pIemCpu->cbWritten += (uint32_t)cbMem;
4505}
4506
4507
4508/**
4509 * Checks if the given segment can be written to, raise the appropriate
4510 * exception if not.
4511 *
4512 * @returns VBox strict status code.
4513 *
4514 * @param pIemCpu The IEM per CPU data.
4515 * @param pHid Pointer to the hidden register.
4516 * @param iSegReg The register number.
4517 */
4518static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4519{
4520 if (!pHid->Attr.n.u1Present)
4521 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4522
4523 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4524 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4525 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4526 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4527
4528 /** @todo DPL/RPL/CPL? */
4529
4530 return VINF_SUCCESS;
4531}
4532
4533
4534/**
4535 * Checks if the given segment can be read from, raise the appropriate
4536 * exception if not.
4537 *
4538 * @returns VBox strict status code.
4539 *
4540 * @param pIemCpu The IEM per CPU data.
4541 * @param pHid Pointer to the hidden register.
4542 * @param iSegReg The register number.
4543 */
4544static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4545{
4546 if (!pHid->Attr.n.u1Present)
4547 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4548
4549 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4550 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4551 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4552
4553 /** @todo DPL/RPL/CPL? */
4554
4555 return VINF_SUCCESS;
4556}
4557
4558
4559/**
4560 * Applies the segment limit, base and attributes.
4561 *
4562 * This may raise a \#GP or \#SS.
4563 *
4564 * @returns VBox strict status code.
4565 *
4566 * @param pIemCpu The IEM per CPU data.
4567 * @param fAccess The kind of access which is being performed.
4568 * @param iSegReg The index of the segment register to apply.
4569 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4570 * TSS, ++).
4571 * @param pGCPtrMem Pointer to the guest memory address to apply
4572 * segmentation to. Input and output parameter.
4573 */
4574static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4575 size_t cbMem, PRTGCPTR pGCPtrMem)
4576{
4577 if (iSegReg == UINT8_MAX)
4578 return VINF_SUCCESS;
4579
4580 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4581 switch (pIemCpu->enmCpuMode)
4582 {
4583 case IEMMODE_16BIT:
4584 case IEMMODE_32BIT:
4585 {
4586 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4587 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4588
4589 Assert(pSel->Attr.n.u1Present);
4590 Assert(pSel->Attr.n.u1DescType);
4591 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4592 {
4593 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4594 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4595 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4596
4597 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4598 {
4599 /** @todo CPL check. */
4600 }
4601
4602 /*
4603 * There are two kinds of data selectors, normal and expand down.
4604 */
4605 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4606 {
4607 if ( GCPtrFirst32 > pSel->u32Limit
4608 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4609 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4610
4611 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4612 }
4613 else
4614 {
4615 /** @todo implement expand down segments. */
4616 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4617 }
4618 }
4619 else
4620 {
4621
4622 /*
4623 * Code selector and usually be used to read thru, writing is
4624 * only permitted in real and V8086 mode.
4625 */
4626 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4627 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4628 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4629 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4630 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4631
4632 if ( GCPtrFirst32 > pSel->u32Limit
4633 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4634 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4635
4636 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4637 {
4638 /** @todo CPL check. */
4639 }
4640
4641 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4642 }
4643 return VINF_SUCCESS;
4644 }
4645
4646 case IEMMODE_64BIT:
4647 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4648 *pGCPtrMem += pSel->u64Base;
4649 return VINF_SUCCESS;
4650
4651 default:
4652 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4653 }
4654}
4655
4656
4657/**
4658 * Translates a virtual address to a physical physical address and checks if we
4659 * can access the page as specified.
4660 *
4661 * @param pIemCpu The IEM per CPU data.
4662 * @param GCPtrMem The virtual address.
4663 * @param fAccess The intended access.
4664 * @param pGCPhysMem Where to return the physical address.
4665 */
4666static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4667 PRTGCPHYS pGCPhysMem)
4668{
4669 /** @todo Need a different PGM interface here. We're currently using
4670 * generic / REM interfaces. this won't cut it for R0 & RC. */
4671 RTGCPHYS GCPhys;
4672 uint64_t fFlags;
4673 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4674 if (RT_FAILURE(rc))
4675 {
4676 /** @todo Check unassigned memory in unpaged mode. */
4677 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4678 *pGCPhysMem = NIL_RTGCPHYS;
4679 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4680 }
4681
4682 /* If the page is writable and does not have the no-exec bit set, all
4683 access is allowed. Otherwise we'll have to check more carefully... */
4684 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4685 {
4686 /* Write to read only memory? */
4687 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4688 && !(fFlags & X86_PTE_RW)
4689 && ( pIemCpu->uCpl != 0
4690 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4691 {
4692 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4693 *pGCPhysMem = NIL_RTGCPHYS;
4694 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4695 }
4696
4697 /* Kernel memory accessed by userland? */
4698 if ( !(fFlags & X86_PTE_US)
4699 && pIemCpu->uCpl == 3
4700 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4701 {
4702 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4703 *pGCPhysMem = NIL_RTGCPHYS;
4704 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4705 }
4706
4707 /* Executing non-executable memory? */
4708 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4709 && (fFlags & X86_PTE_PAE_NX)
4710 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4711 {
4712 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4713 *pGCPhysMem = NIL_RTGCPHYS;
4714 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4715 VERR_ACCESS_DENIED);
4716 }
4717 }
4718
4719 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4720 *pGCPhysMem = GCPhys;
4721 return VINF_SUCCESS;
4722}
4723
4724
4725
4726/**
4727 * Maps a physical page.
4728 *
4729 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4730 * @param pIemCpu The IEM per CPU data.
4731 * @param GCPhysMem The physical address.
4732 * @param fAccess The intended access.
4733 * @param ppvMem Where to return the mapping address.
4734 * @param pLock The PGM lock.
4735 */
4736static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
4737{
4738#ifdef IEM_VERIFICATION_MODE_FULL
4739 /* Force the alternative path so we can ignore writes. */
4740 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4741 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4742#endif
4743#ifdef IEM_LOG_MEMORY_WRITES
4744 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4745 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4746#endif
4747#ifdef IEM_VERIFICATION_MODE_MINIMAL
4748 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4749#endif
4750
4751 /** @todo This API may require some improving later. A private deal with PGM
4752 * regarding locking and unlocking needs to be struct. A couple of TLBs
4753 * living in PGM, but with publicly accessible inlined access methods
4754 * could perhaps be an even better solution. */
4755 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4756 GCPhysMem,
4757 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4758 pIemCpu->fBypassHandlers,
4759 ppvMem,
4760 pLock);
4761 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4762 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4763 return rc;
4764}
4765
4766
4767/**
4768 * Unmap a page previously mapped by iemMemPageMap.
4769 *
4770 * @param pIemCpu The IEM per CPU data.
4771 * @param GCPhysMem The physical address.
4772 * @param fAccess The intended access.
4773 * @param pvMem What iemMemPageMap returned.
4774 * @param pLock The PGM lock.
4775 */
4776DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
4777{
4778 NOREF(pIemCpu);
4779 NOREF(GCPhysMem);
4780 NOREF(fAccess);
4781 NOREF(pvMem);
4782 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
4783}
4784
4785
4786/**
4787 * Looks up a memory mapping entry.
4788 *
4789 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4790 * @param pIemCpu The IEM per CPU data.
4791 * @param pvMem The memory address.
4792 * @param fAccess The access to.
4793 */
4794DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4795{
4796 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4797 if ( pIemCpu->aMemMappings[0].pv == pvMem
4798 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4799 return 0;
4800 if ( pIemCpu->aMemMappings[1].pv == pvMem
4801 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4802 return 1;
4803 if ( pIemCpu->aMemMappings[2].pv == pvMem
4804 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4805 return 2;
4806 return VERR_NOT_FOUND;
4807}
4808
4809
4810/**
4811 * Finds a free memmap entry when using iNextMapping doesn't work.
4812 *
4813 * @returns Memory mapping index, 1024 on failure.
4814 * @param pIemCpu The IEM per CPU data.
4815 */
4816static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4817{
4818 /*
4819 * The easy case.
4820 */
4821 if (pIemCpu->cActiveMappings == 0)
4822 {
4823 pIemCpu->iNextMapping = 1;
4824 return 0;
4825 }
4826
4827 /* There should be enough mappings for all instructions. */
4828 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4829
4830 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4831 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4832 return i;
4833
4834 AssertFailedReturn(1024);
4835}
4836
4837
4838/**
4839 * Commits a bounce buffer that needs writing back and unmaps it.
4840 *
4841 * @returns Strict VBox status code.
4842 * @param pIemCpu The IEM per CPU data.
4843 * @param iMemMap The index of the buffer to commit.
4844 */
4845static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4846{
4847 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4848 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4849
4850 /*
4851 * Do the writing.
4852 */
4853 int rc;
4854#ifndef IEM_VERIFICATION_MODE_MINIMAL
4855 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4856 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4857 {
4858 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4859 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4860 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4861 if (!pIemCpu->fBypassHandlers)
4862 {
4863 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4864 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4865 pbBuf,
4866 cbFirst);
4867 if (cbSecond && rc == VINF_SUCCESS)
4868 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4869 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4870 pbBuf + cbFirst,
4871 cbSecond);
4872 }
4873 else
4874 {
4875 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4876 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4877 pbBuf,
4878 cbFirst);
4879 if (cbSecond && rc == VINF_SUCCESS)
4880 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4881 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4882 pbBuf + cbFirst,
4883 cbSecond);
4884 }
4885 if (rc != VINF_SUCCESS)
4886 {
4887 /** @todo status code handling */
4888 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
4889 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
4890 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4891 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
4892 }
4893 }
4894 else
4895#endif
4896 rc = VINF_SUCCESS;
4897
4898#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
4899 /*
4900 * Record the write(s).
4901 */
4902 if (!pIemCpu->fNoRem)
4903 {
4904 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4905 if (pEvtRec)
4906 {
4907 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4908 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4909 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4910 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4911 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4912 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4913 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4914 }
4915 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4916 {
4917 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4918 if (pEvtRec)
4919 {
4920 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4921 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4922 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4923 memcpy(pEvtRec->u.RamWrite.ab,
4924 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4925 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4926 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4927 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4928 }
4929 }
4930 }
4931#endif
4932#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
4933 if (rc == VINF_SUCCESS)
4934 {
4935 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4936 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
4937 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4938 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4939 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
4940 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
4941
4942 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4943 g_cbIemWrote = cbWrote;
4944 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
4945 }
4946#endif
4947
4948 /*
4949 * Free the mapping entry.
4950 */
4951 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4952 Assert(pIemCpu->cActiveMappings != 0);
4953 pIemCpu->cActiveMappings--;
4954 return rc;
4955}
4956
4957
4958/**
4959 * iemMemMap worker that deals with a request crossing pages.
4960 */
4961static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4962 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4963{
4964 /*
4965 * Do the address translations.
4966 */
4967 RTGCPHYS GCPhysFirst;
4968 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4969 if (rcStrict != VINF_SUCCESS)
4970 return rcStrict;
4971
4972/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
4973 * last byte. */
4974 RTGCPHYS GCPhysSecond;
4975 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4976 if (rcStrict != VINF_SUCCESS)
4977 return rcStrict;
4978 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4979
4980 /*
4981 * Read in the current memory content if it's a read, execute or partial
4982 * write access.
4983 */
4984 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4985 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4986 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4987
4988 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4989 {
4990 int rc;
4991 if (!pIemCpu->fBypassHandlers)
4992 {
4993 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4994 if (rc != VINF_SUCCESS)
4995 {
4996 /** @todo status code handling */
4997 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4998 return rc;
4999 }
5000 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5001 if (rc != VINF_SUCCESS)
5002 {
5003 /** @todo status code handling */
5004 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5005 return rc;
5006 }
5007 }
5008 else
5009 {
5010 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5011 if (rc != VINF_SUCCESS)
5012 {
5013 /** @todo status code handling */
5014 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5015 return rc;
5016 }
5017 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5018 if (rc != VINF_SUCCESS)
5019 {
5020 /** @todo status code handling */
5021 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5022 return rc;
5023 }
5024 }
5025
5026#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5027 if ( !pIemCpu->fNoRem
5028 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5029 {
5030 /*
5031 * Record the reads.
5032 */
5033 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5034 if (pEvtRec)
5035 {
5036 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5037 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5038 pEvtRec->u.RamRead.cb = cbFirstPage;
5039 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5040 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5041 }
5042 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5043 if (pEvtRec)
5044 {
5045 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5046 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5047 pEvtRec->u.RamRead.cb = cbSecondPage;
5048 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5049 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5050 }
5051 }
5052#endif
5053 }
5054#ifdef VBOX_STRICT
5055 else
5056 memset(pbBuf, 0xcc, cbMem);
5057#endif
5058#ifdef VBOX_STRICT
5059 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5060 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5061#endif
5062
5063 /*
5064 * Commit the bounce buffer entry.
5065 */
5066 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5067 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5068 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5069 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5070 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5071 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5072 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5073 pIemCpu->cActiveMappings++;
5074
5075 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5076 *ppvMem = pbBuf;
5077 return VINF_SUCCESS;
5078}
5079
5080
5081/**
5082 * iemMemMap woker that deals with iemMemPageMap failures.
5083 */
5084static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5085 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5086{
5087 /*
5088 * Filter out conditions we can handle and the ones which shouldn't happen.
5089 */
5090 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5091 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5092 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5093 {
5094 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5095 return rcMap;
5096 }
5097 pIemCpu->cPotentialExits++;
5098
5099 /*
5100 * Read in the current memory content if it's a read, execute or partial
5101 * write access.
5102 */
5103 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5104 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5105 {
5106 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5107 memset(pbBuf, 0xff, cbMem);
5108 else
5109 {
5110 int rc;
5111 if (!pIemCpu->fBypassHandlers)
5112 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5113 else
5114 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5115 if (rc != VINF_SUCCESS)
5116 {
5117 /** @todo status code handling */
5118 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5119 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5120 return rc;
5121 }
5122 }
5123
5124#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5125 if ( !pIemCpu->fNoRem
5126 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5127 {
5128 /*
5129 * Record the read.
5130 */
5131 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5132 if (pEvtRec)
5133 {
5134 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5135 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5136 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5137 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5138 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5139 }
5140 }
5141#endif
5142 }
5143#ifdef VBOX_STRICT
5144 else
5145 memset(pbBuf, 0xcc, cbMem);
5146#endif
5147#ifdef VBOX_STRICT
5148 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5149 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5150#endif
5151
5152 /*
5153 * Commit the bounce buffer entry.
5154 */
5155 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5156 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5157 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5158 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5159 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5160 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5161 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5162 pIemCpu->cActiveMappings++;
5163
5164 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5165 *ppvMem = pbBuf;
5166 return VINF_SUCCESS;
5167}
5168
5169
5170
5171/**
5172 * Maps the specified guest memory for the given kind of access.
5173 *
5174 * This may be using bounce buffering of the memory if it's crossing a page
5175 * boundary or if there is an access handler installed for any of it. Because
5176 * of lock prefix guarantees, we're in for some extra clutter when this
5177 * happens.
5178 *
5179 * This may raise a \#GP, \#SS, \#PF or \#AC.
5180 *
5181 * @returns VBox strict status code.
5182 *
5183 * @param pIemCpu The IEM per CPU data.
5184 * @param ppvMem Where to return the pointer to the mapped
5185 * memory.
5186 * @param cbMem The number of bytes to map. This is usually 1,
5187 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5188 * string operations it can be up to a page.
5189 * @param iSegReg The index of the segment register to use for
5190 * this access. The base and limits are checked.
5191 * Use UINT8_MAX to indicate that no segmentation
5192 * is required (for IDT, GDT and LDT accesses).
5193 * @param GCPtrMem The address of the guest memory.
5194 * @param a_fAccess How the memory is being accessed. The
5195 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5196 * how to map the memory, while the
5197 * IEM_ACCESS_WHAT_XXX bit is used when raising
5198 * exceptions.
5199 */
5200static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5201{
5202 /*
5203 * Check the input and figure out which mapping entry to use.
5204 */
5205 Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94);
5206 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5207
5208 unsigned iMemMap = pIemCpu->iNextMapping;
5209 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5210 {
5211 iMemMap = iemMemMapFindFree(pIemCpu);
5212 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5213 }
5214
5215 /*
5216 * Map the memory, checking that we can actually access it. If something
5217 * slightly complicated happens, fall back on bounce buffering.
5218 */
5219 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5220 if (rcStrict != VINF_SUCCESS)
5221 return rcStrict;
5222
5223 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5224 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5225
5226 RTGCPHYS GCPhysFirst;
5227 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5228 if (rcStrict != VINF_SUCCESS)
5229 return rcStrict;
5230
5231 void *pvMem;
5232 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5233 if (rcStrict != VINF_SUCCESS)
5234 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5235
5236 /*
5237 * Fill in the mapping table entry.
5238 */
5239 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5240 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5241 pIemCpu->iNextMapping = iMemMap + 1;
5242 pIemCpu->cActiveMappings++;
5243
5244 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5245 *ppvMem = pvMem;
5246 return VINF_SUCCESS;
5247}
5248
5249
5250/**
5251 * Commits the guest memory if bounce buffered and unmaps it.
5252 *
5253 * @returns Strict VBox status code.
5254 * @param pIemCpu The IEM per CPU data.
5255 * @param pvMem The mapping.
5256 * @param fAccess The kind of access.
5257 */
5258static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5259{
5260 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5261 AssertReturn(iMemMap >= 0, iMemMap);
5262
5263 /* If it's bounce buffered, we may need to write back the buffer. */
5264 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5265 {
5266 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5267 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5268 }
5269 /* Otherwise unlock it. */
5270 else
5271 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5272
5273 /* Free the entry. */
5274 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5275 Assert(pIemCpu->cActiveMappings != 0);
5276 pIemCpu->cActiveMappings--;
5277 return VINF_SUCCESS;
5278}
5279
5280
5281/**
5282 * Fetches a data byte.
5283 *
5284 * @returns Strict VBox status code.
5285 * @param pIemCpu The IEM per CPU data.
5286 * @param pu8Dst Where to return the byte.
5287 * @param iSegReg The index of the segment register to use for
5288 * this access. The base and limits are checked.
5289 * @param GCPtrMem The address of the guest memory.
5290 */
5291static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5292{
5293 /* The lazy approach for now... */
5294 uint8_t const *pu8Src;
5295 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5296 if (rc == VINF_SUCCESS)
5297 {
5298 *pu8Dst = *pu8Src;
5299 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5300 }
5301 return rc;
5302}
5303
5304
5305/**
5306 * Fetches a data word.
5307 *
5308 * @returns Strict VBox status code.
5309 * @param pIemCpu The IEM per CPU data.
5310 * @param pu16Dst Where to return the word.
5311 * @param iSegReg The index of the segment register to use for
5312 * this access. The base and limits are checked.
5313 * @param GCPtrMem The address of the guest memory.
5314 */
5315static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5316{
5317 /* The lazy approach for now... */
5318 uint16_t const *pu16Src;
5319 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5320 if (rc == VINF_SUCCESS)
5321 {
5322 *pu16Dst = *pu16Src;
5323 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5324 }
5325 return rc;
5326}
5327
5328
5329/**
5330 * Fetches a data dword.
5331 *
5332 * @returns Strict VBox status code.
5333 * @param pIemCpu The IEM per CPU data.
5334 * @param pu32Dst Where to return the dword.
5335 * @param iSegReg The index of the segment register to use for
5336 * this access. The base and limits are checked.
5337 * @param GCPtrMem The address of the guest memory.
5338 */
5339static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5340{
5341 /* The lazy approach for now... */
5342 uint32_t const *pu32Src;
5343 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5344 if (rc == VINF_SUCCESS)
5345 {
5346 *pu32Dst = *pu32Src;
5347 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5348 }
5349 return rc;
5350}
5351
5352
5353#ifdef SOME_UNUSED_FUNCTION
5354/**
5355 * Fetches a data dword and sign extends it to a qword.
5356 *
5357 * @returns Strict VBox status code.
5358 * @param pIemCpu The IEM per CPU data.
5359 * @param pu64Dst Where to return the sign extended value.
5360 * @param iSegReg The index of the segment register to use for
5361 * this access. The base and limits are checked.
5362 * @param GCPtrMem The address of the guest memory.
5363 */
5364static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5365{
5366 /* The lazy approach for now... */
5367 int32_t const *pi32Src;
5368 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5369 if (rc == VINF_SUCCESS)
5370 {
5371 *pu64Dst = *pi32Src;
5372 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5373 }
5374#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5375 else
5376 *pu64Dst = 0;
5377#endif
5378 return rc;
5379}
5380#endif
5381
5382
5383/**
5384 * Fetches a data qword.
5385 *
5386 * @returns Strict VBox status code.
5387 * @param pIemCpu The IEM per CPU data.
5388 * @param pu64Dst Where to return the qword.
5389 * @param iSegReg The index of the segment register to use for
5390 * this access. The base and limits are checked.
5391 * @param GCPtrMem The address of the guest memory.
5392 */
5393static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5394{
5395 /* The lazy approach for now... */
5396 uint64_t const *pu64Src;
5397 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5398 if (rc == VINF_SUCCESS)
5399 {
5400 *pu64Dst = *pu64Src;
5401 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5402 }
5403 return rc;
5404}
5405
5406
5407/**
5408 * Fetches a data tword.
5409 *
5410 * @returns Strict VBox status code.
5411 * @param pIemCpu The IEM per CPU data.
5412 * @param pr80Dst Where to return the tword.
5413 * @param iSegReg The index of the segment register to use for
5414 * this access. The base and limits are checked.
5415 * @param GCPtrMem The address of the guest memory.
5416 */
5417static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5418{
5419 /* The lazy approach for now... */
5420 PCRTFLOAT80U pr80Src;
5421 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5422 if (rc == VINF_SUCCESS)
5423 {
5424 *pr80Dst = *pr80Src;
5425 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5426 }
5427 return rc;
5428}
5429
5430
5431/**
5432 * Fetches a descriptor register (lgdt, lidt).
5433 *
5434 * @returns Strict VBox status code.
5435 * @param pIemCpu The IEM per CPU data.
5436 * @param pcbLimit Where to return the limit.
5437 * @param pGCPTrBase Where to return the base.
5438 * @param iSegReg The index of the segment register to use for
5439 * this access. The base and limits are checked.
5440 * @param GCPtrMem The address of the guest memory.
5441 * @param enmOpSize The effective operand size.
5442 */
5443static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5444 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5445{
5446 uint8_t const *pu8Src;
5447 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5448 (void **)&pu8Src,
5449 enmOpSize == IEMMODE_64BIT
5450 ? 2 + 8
5451 : enmOpSize == IEMMODE_32BIT
5452 ? 2 + 4
5453 : 2 + 3,
5454 iSegReg,
5455 GCPtrMem,
5456 IEM_ACCESS_DATA_R);
5457 if (rcStrict == VINF_SUCCESS)
5458 {
5459 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5460 switch (enmOpSize)
5461 {
5462 case IEMMODE_16BIT:
5463 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5464 break;
5465 case IEMMODE_32BIT:
5466 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5467 break;
5468 case IEMMODE_64BIT:
5469 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5470 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5471 break;
5472
5473 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5474 }
5475 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5476 }
5477 return rcStrict;
5478}
5479
5480
5481
5482/**
5483 * Stores a data byte.
5484 *
5485 * @returns Strict VBox status code.
5486 * @param pIemCpu The IEM per CPU data.
5487 * @param iSegReg The index of the segment register to use for
5488 * this access. The base and limits are checked.
5489 * @param GCPtrMem The address of the guest memory.
5490 * @param u8Value The value to store.
5491 */
5492static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5493{
5494 /* The lazy approach for now... */
5495 uint8_t *pu8Dst;
5496 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5497 if (rc == VINF_SUCCESS)
5498 {
5499 *pu8Dst = u8Value;
5500 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5501 }
5502 return rc;
5503}
5504
5505
5506/**
5507 * Stores a data word.
5508 *
5509 * @returns Strict VBox status code.
5510 * @param pIemCpu The IEM per CPU data.
5511 * @param iSegReg The index of the segment register to use for
5512 * this access. The base and limits are checked.
5513 * @param GCPtrMem The address of the guest memory.
5514 * @param u16Value The value to store.
5515 */
5516static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5517{
5518 /* The lazy approach for now... */
5519 uint16_t *pu16Dst;
5520 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5521 if (rc == VINF_SUCCESS)
5522 {
5523 *pu16Dst = u16Value;
5524 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5525 }
5526 return rc;
5527}
5528
5529
5530/**
5531 * Stores a data dword.
5532 *
5533 * @returns Strict VBox status code.
5534 * @param pIemCpu The IEM per CPU data.
5535 * @param iSegReg The index of the segment register to use for
5536 * this access. The base and limits are checked.
5537 * @param GCPtrMem The address of the guest memory.
5538 * @param u32Value The value to store.
5539 */
5540static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5541{
5542 /* The lazy approach for now... */
5543 uint32_t *pu32Dst;
5544 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5545 if (rc == VINF_SUCCESS)
5546 {
5547 *pu32Dst = u32Value;
5548 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5549 }
5550 return rc;
5551}
5552
5553
5554/**
5555 * Stores a data qword.
5556 *
5557 * @returns Strict VBox status code.
5558 * @param pIemCpu The IEM per CPU data.
5559 * @param iSegReg The index of the segment register to use for
5560 * this access. The base and limits are checked.
5561 * @param GCPtrMem The address of the guest memory.
5562 * @param u64Value The value to store.
5563 */
5564static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5565{
5566 /* The lazy approach for now... */
5567 uint64_t *pu64Dst;
5568 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5569 if (rc == VINF_SUCCESS)
5570 {
5571 *pu64Dst = u64Value;
5572 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5573 }
5574 return rc;
5575}
5576
5577
5578/**
5579 * Stores a descriptor register (sgdt, sidt).
5580 *
5581 * @returns Strict VBox status code.
5582 * @param pIemCpu The IEM per CPU data.
5583 * @param cbLimit The limit.
5584 * @param GCPTrBase The base address.
5585 * @param iSegReg The index of the segment register to use for
5586 * this access. The base and limits are checked.
5587 * @param GCPtrMem The address of the guest memory.
5588 * @param enmOpSize The effective operand size.
5589 */
5590static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5591 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5592{
5593 uint8_t *pu8Src;
5594 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5595 (void **)&pu8Src,
5596 enmOpSize == IEMMODE_64BIT
5597 ? 2 + 8
5598 : enmOpSize == IEMMODE_32BIT
5599 ? 2 + 4
5600 : 2 + 3,
5601 iSegReg,
5602 GCPtrMem,
5603 IEM_ACCESS_DATA_W);
5604 if (rcStrict == VINF_SUCCESS)
5605 {
5606 pu8Src[0] = RT_BYTE1(cbLimit);
5607 pu8Src[1] = RT_BYTE2(cbLimit);
5608 pu8Src[2] = RT_BYTE1(GCPtrBase);
5609 pu8Src[3] = RT_BYTE2(GCPtrBase);
5610 pu8Src[4] = RT_BYTE3(GCPtrBase);
5611 if (enmOpSize == IEMMODE_16BIT)
5612 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5613 else
5614 {
5615 pu8Src[5] = RT_BYTE4(GCPtrBase);
5616 if (enmOpSize == IEMMODE_64BIT)
5617 {
5618 pu8Src[6] = RT_BYTE5(GCPtrBase);
5619 pu8Src[7] = RT_BYTE6(GCPtrBase);
5620 pu8Src[8] = RT_BYTE7(GCPtrBase);
5621 pu8Src[9] = RT_BYTE8(GCPtrBase);
5622 }
5623 }
5624 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5625 }
5626 return rcStrict;
5627}
5628
5629
5630/**
5631 * Pushes a word onto the stack.
5632 *
5633 * @returns Strict VBox status code.
5634 * @param pIemCpu The IEM per CPU data.
5635 * @param u16Value The value to push.
5636 */
5637static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5638{
5639 /* Increment the stack pointer. */
5640 uint64_t uNewRsp;
5641 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5642 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5643
5644 /* Write the word the lazy way. */
5645 uint16_t *pu16Dst;
5646 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5647 if (rc == VINF_SUCCESS)
5648 {
5649 *pu16Dst = u16Value;
5650 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5651 }
5652
5653 /* Commit the new RSP value unless we an access handler made trouble. */
5654 if (rc == VINF_SUCCESS)
5655 pCtx->rsp = uNewRsp;
5656
5657 return rc;
5658}
5659
5660
5661/**
5662 * Pushes a dword onto the stack.
5663 *
5664 * @returns Strict VBox status code.
5665 * @param pIemCpu The IEM per CPU data.
5666 * @param u32Value The value to push.
5667 */
5668static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5669{
5670 /* Increment the stack pointer. */
5671 uint64_t uNewRsp;
5672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5673 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5674
5675 /* Write the word the lazy way. */
5676 uint32_t *pu32Dst;
5677 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5678 if (rc == VINF_SUCCESS)
5679 {
5680 *pu32Dst = u32Value;
5681 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5682 }
5683
5684 /* Commit the new RSP value unless we an access handler made trouble. */
5685 if (rc == VINF_SUCCESS)
5686 pCtx->rsp = uNewRsp;
5687
5688 return rc;
5689}
5690
5691
5692/**
5693 * Pushes a qword onto the stack.
5694 *
5695 * @returns Strict VBox status code.
5696 * @param pIemCpu The IEM per CPU data.
5697 * @param u64Value The value to push.
5698 */
5699static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5700{
5701 /* Increment the stack pointer. */
5702 uint64_t uNewRsp;
5703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5704 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5705
5706 /* Write the word the lazy way. */
5707 uint64_t *pu64Dst;
5708 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5709 if (rc == VINF_SUCCESS)
5710 {
5711 *pu64Dst = u64Value;
5712 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5713 }
5714
5715 /* Commit the new RSP value unless we an access handler made trouble. */
5716 if (rc == VINF_SUCCESS)
5717 pCtx->rsp = uNewRsp;
5718
5719 return rc;
5720}
5721
5722
5723/**
5724 * Pops a word from the stack.
5725 *
5726 * @returns Strict VBox status code.
5727 * @param pIemCpu The IEM per CPU data.
5728 * @param pu16Value Where to store the popped value.
5729 */
5730static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5731{
5732 /* Increment the stack pointer. */
5733 uint64_t uNewRsp;
5734 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5735 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5736
5737 /* Write the word the lazy way. */
5738 uint16_t const *pu16Src;
5739 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5740 if (rc == VINF_SUCCESS)
5741 {
5742 *pu16Value = *pu16Src;
5743 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5744
5745 /* Commit the new RSP value. */
5746 if (rc == VINF_SUCCESS)
5747 pCtx->rsp = uNewRsp;
5748 }
5749
5750 return rc;
5751}
5752
5753
5754/**
5755 * Pops a dword from the stack.
5756 *
5757 * @returns Strict VBox status code.
5758 * @param pIemCpu The IEM per CPU data.
5759 * @param pu32Value Where to store the popped value.
5760 */
5761static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5762{
5763 /* Increment the stack pointer. */
5764 uint64_t uNewRsp;
5765 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5766 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5767
5768 /* Write the word the lazy way. */
5769 uint32_t const *pu32Src;
5770 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5771 if (rc == VINF_SUCCESS)
5772 {
5773 *pu32Value = *pu32Src;
5774 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5775
5776 /* Commit the new RSP value. */
5777 if (rc == VINF_SUCCESS)
5778 pCtx->rsp = uNewRsp;
5779 }
5780
5781 return rc;
5782}
5783
5784
5785/**
5786 * Pops a qword from the stack.
5787 *
5788 * @returns Strict VBox status code.
5789 * @param pIemCpu The IEM per CPU data.
5790 * @param pu64Value Where to store the popped value.
5791 */
5792static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5793{
5794 /* Increment the stack pointer. */
5795 uint64_t uNewRsp;
5796 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5797 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5798
5799 /* Write the word the lazy way. */
5800 uint64_t const *pu64Src;
5801 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5802 if (rc == VINF_SUCCESS)
5803 {
5804 *pu64Value = *pu64Src;
5805 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5806
5807 /* Commit the new RSP value. */
5808 if (rc == VINF_SUCCESS)
5809 pCtx->rsp = uNewRsp;
5810 }
5811
5812 return rc;
5813}
5814
5815
5816/**
5817 * Pushes a word onto the stack, using a temporary stack pointer.
5818 *
5819 * @returns Strict VBox status code.
5820 * @param pIemCpu The IEM per CPU data.
5821 * @param u16Value The value to push.
5822 * @param pTmpRsp Pointer to the temporary stack pointer.
5823 */
5824static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5825{
5826 /* Increment the stack pointer. */
5827 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5828 RTUINT64U NewRsp = *pTmpRsp;
5829 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5830
5831 /* Write the word the lazy way. */
5832 uint16_t *pu16Dst;
5833 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5834 if (rc == VINF_SUCCESS)
5835 {
5836 *pu16Dst = u16Value;
5837 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5838 }
5839
5840 /* Commit the new RSP value unless we an access handler made trouble. */
5841 if (rc == VINF_SUCCESS)
5842 *pTmpRsp = NewRsp;
5843
5844 return rc;
5845}
5846
5847
5848/**
5849 * Pushes a dword onto the stack, using a temporary stack pointer.
5850 *
5851 * @returns Strict VBox status code.
5852 * @param pIemCpu The IEM per CPU data.
5853 * @param u32Value The value to push.
5854 * @param pTmpRsp Pointer to the temporary stack pointer.
5855 */
5856static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5857{
5858 /* Increment the stack pointer. */
5859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5860 RTUINT64U NewRsp = *pTmpRsp;
5861 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5862
5863 /* Write the word the lazy way. */
5864 uint32_t *pu32Dst;
5865 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5866 if (rc == VINF_SUCCESS)
5867 {
5868 *pu32Dst = u32Value;
5869 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5870 }
5871
5872 /* Commit the new RSP value unless we an access handler made trouble. */
5873 if (rc == VINF_SUCCESS)
5874 *pTmpRsp = NewRsp;
5875
5876 return rc;
5877}
5878
5879
5880/**
5881 * Pushes a dword onto the stack, using a temporary stack pointer.
5882 *
5883 * @returns Strict VBox status code.
5884 * @param pIemCpu The IEM per CPU data.
5885 * @param u64Value The value to push.
5886 * @param pTmpRsp Pointer to the temporary stack pointer.
5887 */
5888static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5889{
5890 /* Increment the stack pointer. */
5891 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5892 RTUINT64U NewRsp = *pTmpRsp;
5893 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5894
5895 /* Write the word the lazy way. */
5896 uint64_t *pu64Dst;
5897 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5898 if (rc == VINF_SUCCESS)
5899 {
5900 *pu64Dst = u64Value;
5901 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5902 }
5903
5904 /* Commit the new RSP value unless we an access handler made trouble. */
5905 if (rc == VINF_SUCCESS)
5906 *pTmpRsp = NewRsp;
5907
5908 return rc;
5909}
5910
5911
5912/**
5913 * Pops a word from the stack, using a temporary stack pointer.
5914 *
5915 * @returns Strict VBox status code.
5916 * @param pIemCpu The IEM per CPU data.
5917 * @param pu16Value Where to store the popped value.
5918 * @param pTmpRsp Pointer to the temporary stack pointer.
5919 */
5920static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5921{
5922 /* Increment the stack pointer. */
5923 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5924 RTUINT64U NewRsp = *pTmpRsp;
5925 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5926
5927 /* Write the word the lazy way. */
5928 uint16_t const *pu16Src;
5929 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5930 if (rc == VINF_SUCCESS)
5931 {
5932 *pu16Value = *pu16Src;
5933 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5934
5935 /* Commit the new RSP value. */
5936 if (rc == VINF_SUCCESS)
5937 *pTmpRsp = NewRsp;
5938 }
5939
5940 return rc;
5941}
5942
5943
5944/**
5945 * Pops a dword from the stack, using a temporary stack pointer.
5946 *
5947 * @returns Strict VBox status code.
5948 * @param pIemCpu The IEM per CPU data.
5949 * @param pu32Value Where to store the popped value.
5950 * @param pTmpRsp Pointer to the temporary stack pointer.
5951 */
5952static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5953{
5954 /* Increment the stack pointer. */
5955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5956 RTUINT64U NewRsp = *pTmpRsp;
5957 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5958
5959 /* Write the word the lazy way. */
5960 uint32_t const *pu32Src;
5961 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5962 if (rc == VINF_SUCCESS)
5963 {
5964 *pu32Value = *pu32Src;
5965 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5966
5967 /* Commit the new RSP value. */
5968 if (rc == VINF_SUCCESS)
5969 *pTmpRsp = NewRsp;
5970 }
5971
5972 return rc;
5973}
5974
5975
5976/**
5977 * Pops a qword from the stack, using a temporary stack pointer.
5978 *
5979 * @returns Strict VBox status code.
5980 * @param pIemCpu The IEM per CPU data.
5981 * @param pu64Value Where to store the popped value.
5982 * @param pTmpRsp Pointer to the temporary stack pointer.
5983 */
5984static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5985{
5986 /* Increment the stack pointer. */
5987 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5988 RTUINT64U NewRsp = *pTmpRsp;
5989 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5990
5991 /* Write the word the lazy way. */
5992 uint64_t const *pu64Src;
5993 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5994 if (rcStrict == VINF_SUCCESS)
5995 {
5996 *pu64Value = *pu64Src;
5997 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5998
5999 /* Commit the new RSP value. */
6000 if (rcStrict == VINF_SUCCESS)
6001 *pTmpRsp = NewRsp;
6002 }
6003
6004 return rcStrict;
6005}
6006
6007
6008/**
6009 * Begin a special stack push (used by interrupt, exceptions and such).
6010 *
6011 * This will raise #SS or #PF if appropriate.
6012 *
6013 * @returns Strict VBox status code.
6014 * @param pIemCpu The IEM per CPU data.
6015 * @param cbMem The number of bytes to push onto the stack.
6016 * @param ppvMem Where to return the pointer to the stack memory.
6017 * As with the other memory functions this could be
6018 * direct access or bounce buffered access, so
6019 * don't commit register until the commit call
6020 * succeeds.
6021 * @param puNewRsp Where to return the new RSP value. This must be
6022 * passed unchanged to
6023 * iemMemStackPushCommitSpecial().
6024 */
6025static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6026{
6027 Assert(cbMem < UINT8_MAX);
6028 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6029 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
6030 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6031}
6032
6033
6034/**
6035 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6036 *
6037 * This will update the rSP.
6038 *
6039 * @returns Strict VBox status code.
6040 * @param pIemCpu The IEM per CPU data.
6041 * @param pvMem The pointer returned by
6042 * iemMemStackPushBeginSpecial().
6043 * @param uNewRsp The new RSP value returned by
6044 * iemMemStackPushBeginSpecial().
6045 */
6046static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6047{
6048 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6049 if (rcStrict == VINF_SUCCESS)
6050 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6051 return rcStrict;
6052}
6053
6054
6055/**
6056 * Begin a special stack pop (used by iret, retf and such).
6057 *
6058 * This will raise \#SS or \#PF if appropriate.
6059 *
6060 * @returns Strict VBox status code.
6061 * @param pIemCpu The IEM per CPU data.
6062 * @param cbMem The number of bytes to push onto the stack.
6063 * @param ppvMem Where to return the pointer to the stack memory.
6064 * @param puNewRsp Where to return the new RSP value. This must be
6065 * passed unchanged to
6066 * iemMemStackPopCommitSpecial() or applied
6067 * manually if iemMemStackPopDoneSpecial() is used.
6068 */
6069static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6070{
6071 Assert(cbMem < UINT8_MAX);
6072 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6073 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
6074 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6075}
6076
6077
6078/**
6079 * Continue a special stack pop (used by iret and retf).
6080 *
6081 * This will raise \#SS or \#PF if appropriate.
6082 *
6083 * @returns Strict VBox status code.
6084 * @param pIemCpu The IEM per CPU data.
6085 * @param cbMem The number of bytes to push onto the stack.
6086 * @param ppvMem Where to return the pointer to the stack memory.
6087 * @param puNewRsp Where to return the new RSP value. This must be
6088 * passed unchanged to
6089 * iemMemStackPopCommitSpecial() or applied
6090 * manually if iemMemStackPopDoneSpecial() is used.
6091 */
6092static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6093{
6094 Assert(cbMem < UINT8_MAX);
6095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6096 RTUINT64U NewRsp;
6097 NewRsp.u = *puNewRsp;
6098 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6099 *puNewRsp = NewRsp.u;
6100 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6101}
6102
6103
6104/**
6105 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6106 *
6107 * This will update the rSP.
6108 *
6109 * @returns Strict VBox status code.
6110 * @param pIemCpu The IEM per CPU data.
6111 * @param pvMem The pointer returned by
6112 * iemMemStackPopBeginSpecial().
6113 * @param uNewRsp The new RSP value returned by
6114 * iemMemStackPopBeginSpecial().
6115 */
6116static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6117{
6118 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6119 if (rcStrict == VINF_SUCCESS)
6120 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6121 return rcStrict;
6122}
6123
6124
6125/**
6126 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6127 * iemMemStackPopContinueSpecial).
6128 *
6129 * The caller will manually commit the rSP.
6130 *
6131 * @returns Strict VBox status code.
6132 * @param pIemCpu The IEM per CPU data.
6133 * @param pvMem The pointer returned by
6134 * iemMemStackPopBeginSpecial() or
6135 * iemMemStackPopContinueSpecial().
6136 */
6137static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6138{
6139 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6140}
6141
6142
6143/**
6144 * Fetches a system table dword.
6145 *
6146 * @returns Strict VBox status code.
6147 * @param pIemCpu The IEM per CPU data.
6148 * @param pu32Dst Where to return the dword.
6149 * @param iSegReg The index of the segment register to use for
6150 * this access. The base and limits are checked.
6151 * @param GCPtrMem The address of the guest memory.
6152 */
6153static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6154{
6155 /* The lazy approach for now... */
6156 uint32_t const *pu32Src;
6157 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6158 if (rc == VINF_SUCCESS)
6159 {
6160 *pu32Dst = *pu32Src;
6161 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6162 }
6163 return rc;
6164}
6165
6166
6167/**
6168 * Fetches a system table qword.
6169 *
6170 * @returns Strict VBox status code.
6171 * @param pIemCpu The IEM per CPU data.
6172 * @param pu64Dst Where to return the qword.
6173 * @param iSegReg The index of the segment register to use for
6174 * this access. The base and limits are checked.
6175 * @param GCPtrMem The address of the guest memory.
6176 */
6177static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6178{
6179 /* The lazy approach for now... */
6180 uint64_t const *pu64Src;
6181 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6182 if (rc == VINF_SUCCESS)
6183 {
6184 *pu64Dst = *pu64Src;
6185 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6186 }
6187 return rc;
6188}
6189
6190
6191/**
6192 * Fetches a descriptor table entry.
6193 *
6194 * @returns Strict VBox status code.
6195 * @param pIemCpu The IEM per CPU.
6196 * @param pDesc Where to return the descriptor table entry.
6197 * @param uSel The selector which table entry to fetch.
6198 */
6199static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6200{
6201 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6202
6203 /** @todo did the 286 require all 8 bytes to be accessible? */
6204 /*
6205 * Get the selector table base and check bounds.
6206 */
6207 RTGCPTR GCPtrBase;
6208 if (uSel & X86_SEL_LDT)
6209 {
6210 if ( !pCtx->ldtr.Attr.n.u1Present
6211 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6212 {
6213 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6214 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6215 /** @todo is this the right exception? */
6216 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6217 }
6218
6219 Assert(pCtx->ldtr.Attr.n.u1Present);
6220 GCPtrBase = pCtx->ldtr.u64Base;
6221 }
6222 else
6223 {
6224 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6225 {
6226 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6227 /** @todo is this the right exception? */
6228 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6229 }
6230 GCPtrBase = pCtx->gdtr.pGdt;
6231 }
6232
6233 /*
6234 * Read the legacy descriptor and maybe the long mode extensions if
6235 * required.
6236 */
6237 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6238 if (rcStrict == VINF_SUCCESS)
6239 {
6240 if ( !IEM_IS_LONG_MODE(pIemCpu)
6241 || pDesc->Legacy.Gen.u1DescType)
6242 pDesc->Long.au64[1] = 0;
6243 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6244 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6245 else
6246 {
6247 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6248 /** @todo is this the right exception? */
6249 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6250 }
6251 }
6252 return rcStrict;
6253}
6254
6255
6256/**
6257 * Fakes a long mode stack selector for SS = 0.
6258 *
6259 * @param pDescSs Where to return the fake stack descriptor.
6260 * @param uDpl The DPL we want.
6261 */
6262static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6263{
6264 pDescSs->Long.au64[0] = 0;
6265 pDescSs->Long.au64[1] = 0;
6266 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6267 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6268 pDescSs->Long.Gen.u2Dpl = uDpl;
6269 pDescSs->Long.Gen.u1Present = 1;
6270 pDescSs->Long.Gen.u1Long = 1;
6271}
6272
6273
6274/**
6275 * Marks the selector descriptor as accessed (only non-system descriptors).
6276 *
6277 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6278 * will therefore skip the limit checks.
6279 *
6280 * @returns Strict VBox status code.
6281 * @param pIemCpu The IEM per CPU.
6282 * @param uSel The selector.
6283 */
6284static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6285{
6286 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6287
6288 /*
6289 * Get the selector table base and calculate the entry address.
6290 */
6291 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6292 ? pCtx->ldtr.u64Base
6293 : pCtx->gdtr.pGdt;
6294 GCPtr += uSel & X86_SEL_MASK;
6295
6296 /*
6297 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6298 * ugly stuff to avoid this. This will make sure it's an atomic access
6299 * as well more or less remove any question about 8-bit or 32-bit accesss.
6300 */
6301 VBOXSTRICTRC rcStrict;
6302 uint32_t volatile *pu32;
6303 if ((GCPtr & 3) == 0)
6304 {
6305 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6306 GCPtr += 2 + 2;
6307 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6308 if (rcStrict != VINF_SUCCESS)
6309 return rcStrict;
6310 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6311 }
6312 else
6313 {
6314 /* The misaligned GDT/LDT case, map the whole thing. */
6315 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6316 if (rcStrict != VINF_SUCCESS)
6317 return rcStrict;
6318 switch ((uintptr_t)pu32 & 3)
6319 {
6320 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6321 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6322 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6323 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6324 }
6325 }
6326
6327 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6328}
6329
6330/** @} */
6331
6332
6333/*
6334 * Include the C/C++ implementation of instruction.
6335 */
6336#include "IEMAllCImpl.cpp.h"
6337
6338
6339
6340/** @name "Microcode" macros.
6341 *
6342 * The idea is that we should be able to use the same code to interpret
6343 * instructions as well as recompiler instructions. Thus this obfuscation.
6344 *
6345 * @{
6346 */
6347#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6348#define IEM_MC_END() }
6349#define IEM_MC_PAUSE() do {} while (0)
6350#define IEM_MC_CONTINUE() do {} while (0)
6351
6352/** Internal macro. */
6353#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6354 do \
6355 { \
6356 VBOXSTRICTRC rcStrict2 = a_Expr; \
6357 if (rcStrict2 != VINF_SUCCESS) \
6358 return rcStrict2; \
6359 } while (0)
6360
6361#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6362#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6363#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6364#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6365#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6366#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6367#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6368
6369#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6370#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6371 do { \
6372 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6373 return iemRaiseDeviceNotAvailable(pIemCpu); \
6374 } while (0)
6375#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6376 do { \
6377 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6378 return iemRaiseMathFault(pIemCpu); \
6379 } while (0)
6380#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6381 do { \
6382 if (pIemCpu->uCpl != 0) \
6383 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6384 } while (0)
6385
6386
6387#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6388#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6389#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6390#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6391#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6392#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6393#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6394 uint32_t a_Name; \
6395 uint32_t *a_pName = &a_Name
6396#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6397 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6398
6399#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6400#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6401
6402#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6403#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6404#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6405#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6406#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6407#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6408#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6409#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6410#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6411#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6412#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6413#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6414#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6415#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6416#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6417#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6418#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6419#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6420#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6421#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6422#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6423#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6424#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6425#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6426#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6427#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6428#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6429#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6430#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6431/** @note Not for IOPL or IF testing or modification. */
6432#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6433#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6434#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6435#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6436
6437#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6438#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6439#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6440#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6441#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6442#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6443#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6444#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6445#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6446#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6447#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6448 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6449
6450#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6451#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6452/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6453 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6454#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6455#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6456/** @note Not for IOPL or IF testing or modification. */
6457#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6458
6459#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6460#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6461#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6462 do { \
6463 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6464 *pu32Reg += (a_u32Value); \
6465 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6466 } while (0)
6467#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6468
6469#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6470#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6471#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6472 do { \
6473 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6474 *pu32Reg -= (a_u32Value); \
6475 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6476 } while (0)
6477#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6478
6479#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6480#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6481#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6482#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6483#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6484#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6485#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6486
6487#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6488#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6489#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6490#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6491
6492#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6493#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6494#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6495
6496#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6497#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6498
6499#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6500#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6501#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6502
6503#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6504#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6505#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6506
6507#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6508
6509#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6510
6511#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6512#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6513#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6514 do { \
6515 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6516 *pu32Reg &= (a_u32Value); \
6517 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6518 } while (0)
6519#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6520
6521#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6522#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6523#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6524 do { \
6525 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6526 *pu32Reg |= (a_u32Value); \
6527 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6528 } while (0)
6529#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6530
6531
6532/** @note Not for IOPL or IF modification. */
6533#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6534/** @note Not for IOPL or IF modification. */
6535#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6536/** @note Not for IOPL or IF modification. */
6537#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6538
6539#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6540
6541
6542#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6543 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6544#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6545 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6546#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6548
6549#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6550 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6551#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6552 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6553#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6555
6556#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6557 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6558#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6560#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6561 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6562
6563#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6564 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6565
6566#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6568#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6569 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6570
6571#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6572 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6573#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6574 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6575#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6577
6578
6579#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6580 do { \
6581 uint8_t u8Tmp; \
6582 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6583 (a_u16Dst) = u8Tmp; \
6584 } while (0)
6585#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6586 do { \
6587 uint8_t u8Tmp; \
6588 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6589 (a_u32Dst) = u8Tmp; \
6590 } while (0)
6591#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6592 do { \
6593 uint8_t u8Tmp; \
6594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6595 (a_u64Dst) = u8Tmp; \
6596 } while (0)
6597#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6598 do { \
6599 uint16_t u16Tmp; \
6600 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6601 (a_u32Dst) = u16Tmp; \
6602 } while (0)
6603#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6604 do { \
6605 uint16_t u16Tmp; \
6606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6607 (a_u64Dst) = u16Tmp; \
6608 } while (0)
6609#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6610 do { \
6611 uint32_t u32Tmp; \
6612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6613 (a_u64Dst) = u32Tmp; \
6614 } while (0)
6615
6616#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6617 do { \
6618 uint8_t u8Tmp; \
6619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6620 (a_u16Dst) = (int8_t)u8Tmp; \
6621 } while (0)
6622#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6623 do { \
6624 uint8_t u8Tmp; \
6625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6626 (a_u32Dst) = (int8_t)u8Tmp; \
6627 } while (0)
6628#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6629 do { \
6630 uint8_t u8Tmp; \
6631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6632 (a_u64Dst) = (int8_t)u8Tmp; \
6633 } while (0)
6634#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6635 do { \
6636 uint16_t u16Tmp; \
6637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6638 (a_u32Dst) = (int16_t)u16Tmp; \
6639 } while (0)
6640#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6641 do { \
6642 uint16_t u16Tmp; \
6643 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6644 (a_u64Dst) = (int16_t)u16Tmp; \
6645 } while (0)
6646#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6647 do { \
6648 uint32_t u32Tmp; \
6649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6650 (a_u64Dst) = (int32_t)u32Tmp; \
6651 } while (0)
6652
6653#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6654 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6655#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6656 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6657#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6658 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6659#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6660 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6661
6662#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6663 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6664#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6665 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6666#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6667 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6668#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6669 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6670
6671#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6672#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6673#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6674#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6675#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6676#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6677#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6678 do { \
6679 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6680 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6681 } while (0)
6682
6683
6684#define IEM_MC_PUSH_U16(a_u16Value) \
6685 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6686#define IEM_MC_PUSH_U32(a_u32Value) \
6687 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6688#define IEM_MC_PUSH_U64(a_u64Value) \
6689 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6690
6691#define IEM_MC_POP_U16(a_pu16Value) \
6692 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6693#define IEM_MC_POP_U32(a_pu32Value) \
6694 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6695#define IEM_MC_POP_U64(a_pu64Value) \
6696 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6697
6698/** Maps guest memory for direct or bounce buffered access.
6699 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6700 * @remarks May return.
6701 */
6702#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6703 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6704
6705/** Maps guest memory for direct or bounce buffered access.
6706 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6707 * @remarks May return.
6708 */
6709#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6710 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6711
6712/** Commits the memory and unmaps the guest memory.
6713 * @remarks May return.
6714 */
6715#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6716 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6717
6718/** Commits the memory and unmaps the guest memory unless the FPU status word
6719 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6720 * that would cause FLD not to store.
6721 *
6722 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6723 * store, while \#P will not.
6724 *
6725 * @remarks May in theory return - for now.
6726 */
6727#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6728 do { \
6729 if ( !(a_u16FSW & X86_FSW_ES) \
6730 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6731 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6732 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6733 } while (0)
6734
6735/** Calculate efficient address from R/M. */
6736#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6737 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6738
6739#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6740#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6741#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6742#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6743#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6744
6745/**
6746 * Defers the rest of the instruction emulation to a C implementation routine
6747 * and returns, only taking the standard parameters.
6748 *
6749 * @param a_pfnCImpl The pointer to the C routine.
6750 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6751 */
6752#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6753
6754/**
6755 * Defers the rest of instruction emulation to a C implementation routine and
6756 * returns, taking one argument in addition to the standard ones.
6757 *
6758 * @param a_pfnCImpl The pointer to the C routine.
6759 * @param a0 The argument.
6760 */
6761#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6762
6763/**
6764 * Defers the rest of the instruction emulation to a C implementation routine
6765 * and returns, taking two arguments in addition to the standard ones.
6766 *
6767 * @param a_pfnCImpl The pointer to the C routine.
6768 * @param a0 The first extra argument.
6769 * @param a1 The second extra argument.
6770 */
6771#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6772
6773/**
6774 * Defers the rest of the instruction emulation to a C implementation routine
6775 * and returns, taking two arguments in addition to the standard ones.
6776 *
6777 * @param a_pfnCImpl The pointer to the C routine.
6778 * @param a0 The first extra argument.
6779 * @param a1 The second extra argument.
6780 * @param a2 The third extra argument.
6781 */
6782#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6783
6784/**
6785 * Defers the rest of the instruction emulation to a C implementation routine
6786 * and returns, taking two arguments in addition to the standard ones.
6787 *
6788 * @param a_pfnCImpl The pointer to the C routine.
6789 * @param a0 The first extra argument.
6790 * @param a1 The second extra argument.
6791 * @param a2 The third extra argument.
6792 * @param a3 The fourth extra argument.
6793 * @param a4 The fifth extra argument.
6794 */
6795#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6796
6797/**
6798 * Defers the entire instruction emulation to a C implementation routine and
6799 * returns, only taking the standard parameters.
6800 *
6801 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6802 *
6803 * @param a_pfnCImpl The pointer to the C routine.
6804 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6805 */
6806#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6807
6808/**
6809 * Defers the entire instruction emulation to a C implementation routine and
6810 * returns, taking one argument in addition to the standard ones.
6811 *
6812 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6813 *
6814 * @param a_pfnCImpl The pointer to the C routine.
6815 * @param a0 The argument.
6816 */
6817#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6818
6819/**
6820 * Defers the entire instruction emulation to a C implementation routine and
6821 * returns, taking two arguments in addition to the standard ones.
6822 *
6823 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6824 *
6825 * @param a_pfnCImpl The pointer to the C routine.
6826 * @param a0 The first extra argument.
6827 * @param a1 The second extra argument.
6828 */
6829#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6830
6831/**
6832 * Defers the entire instruction emulation to a C implementation routine and
6833 * returns, taking three arguments in addition to the standard ones.
6834 *
6835 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6836 *
6837 * @param a_pfnCImpl The pointer to the C routine.
6838 * @param a0 The first extra argument.
6839 * @param a1 The second extra argument.
6840 * @param a2 The third extra argument.
6841 */
6842#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6843
6844/**
6845 * Calls a FPU assembly implementation taking one visible argument.
6846 *
6847 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6848 * @param a0 The first extra argument.
6849 */
6850#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6851 do { \
6852 iemFpuPrepareUsage(pIemCpu); \
6853 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6854 } while (0)
6855
6856/**
6857 * Calls a FPU assembly implementation taking two visible arguments.
6858 *
6859 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6860 * @param a0 The first extra argument.
6861 * @param a1 The second extra argument.
6862 */
6863#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6864 do { \
6865 iemFpuPrepareUsage(pIemCpu); \
6866 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6867 } while (0)
6868
6869/**
6870 * Calls a FPU assembly implementation taking three visible arguments.
6871 *
6872 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6873 * @param a0 The first extra argument.
6874 * @param a1 The second extra argument.
6875 * @param a2 The third extra argument.
6876 */
6877#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6878 do { \
6879 iemFpuPrepareUsage(pIemCpu); \
6880 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6881 } while (0)
6882
6883#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6884 do { \
6885 (a_FpuData).FSW = (a_FSW); \
6886 (a_FpuData).r80Result = *(a_pr80Value); \
6887 } while (0)
6888
6889/** Pushes FPU result onto the stack. */
6890#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6891 iemFpuPushResult(pIemCpu, &a_FpuData)
6892/** Pushes FPU result onto the stack and sets the FPUDP. */
6893#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6894 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6895
6896/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6897#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6898 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6899
6900/** Stores FPU result in a stack register. */
6901#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6902 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6903/** Stores FPU result in a stack register and pops the stack. */
6904#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6905 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6906/** Stores FPU result in a stack register and sets the FPUDP. */
6907#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6908 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6909/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6910 * stack. */
6911#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6912 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6913
6914/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6915#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6916 iemFpuUpdateOpcodeAndIp(pIemCpu)
6917/** Free a stack register (for FFREE and FFREEP). */
6918#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6919 iemFpuStackFree(pIemCpu, a_iStReg)
6920/** Increment the FPU stack pointer. */
6921#define IEM_MC_FPU_STACK_INC_TOP() \
6922 iemFpuStackIncTop(pIemCpu)
6923/** Decrement the FPU stack pointer. */
6924#define IEM_MC_FPU_STACK_DEC_TOP() \
6925 iemFpuStackDecTop(pIemCpu)
6926
6927/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6928#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6929 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6930/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6931#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6932 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6933/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6934#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6935 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6936/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6937#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6938 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6939/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6940 * stack. */
6941#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6942 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6943/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6944#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6945 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6946
6947/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6948#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6949 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6950/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6951 * stack. */
6952#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6953 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6954/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6955 * FPUDS. */
6956#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6957 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6958/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6959 * FPUDS. Pops stack. */
6960#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6961 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6962/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6963 * stack twice. */
6964#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6965 iemFpuStackUnderflowThenPopPop(pIemCpu)
6966/** Raises a FPU stack underflow exception for an instruction pushing a result
6967 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6968#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6969 iemFpuStackPushUnderflow(pIemCpu)
6970/** Raises a FPU stack underflow exception for an instruction pushing a result
6971 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6972#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6973 iemFpuStackPushUnderflowTwo(pIemCpu)
6974
6975/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6976 * FPUIP, FPUCS and FOP. */
6977#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6978 iemFpuStackPushOverflow(pIemCpu)
6979/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6980 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6981#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6982 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6983/** Indicates that we (might) have modified the FPU state. */
6984#define IEM_MC_USED_FPU() \
6985 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
6986
6987/** @note Not for IOPL or IF testing. */
6988#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6989/** @note Not for IOPL or IF testing. */
6990#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6991/** @note Not for IOPL or IF testing. */
6992#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6993/** @note Not for IOPL or IF testing. */
6994#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6995/** @note Not for IOPL or IF testing. */
6996#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6997 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6998 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6999/** @note Not for IOPL or IF testing. */
7000#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7001 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7002 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7003/** @note Not for IOPL or IF testing. */
7004#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7005 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7006 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7007 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7008/** @note Not for IOPL or IF testing. */
7009#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7010 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7011 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7012 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7013#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7014#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7015#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7016/** @note Not for IOPL or IF testing. */
7017#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7018 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7019 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7020/** @note Not for IOPL or IF testing. */
7021#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7022 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7023 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7024/** @note Not for IOPL or IF testing. */
7025#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7026 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7027 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7028/** @note Not for IOPL or IF testing. */
7029#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7030 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7031 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7032/** @note Not for IOPL or IF testing. */
7033#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7034 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7035 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7036/** @note Not for IOPL or IF testing. */
7037#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7038 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7039 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7040#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7041#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7042#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7043 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7044#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7045 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7046#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7047 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7048#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7049 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7050#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7051 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7052#define IEM_MC_IF_FCW_IM() \
7053 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7054
7055#define IEM_MC_ELSE() } else {
7056#define IEM_MC_ENDIF() } do {} while (0)
7057
7058/** @} */
7059
7060
7061/** @name Opcode Debug Helpers.
7062 * @{
7063 */
7064#ifdef DEBUG
7065# define IEMOP_MNEMONIC(a_szMnemonic) \
7066 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7067 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7068# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7069 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7070 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7071#else
7072# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7073# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7074#endif
7075
7076/** @} */
7077
7078
7079/** @name Opcode Helpers.
7080 * @{
7081 */
7082
7083/** The instruction raises an \#UD in real and V8086 mode. */
7084#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7085 do \
7086 { \
7087 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7088 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7089 } while (0)
7090
7091/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7092 * lock prefixed.
7093 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7094#define IEMOP_HLP_NO_LOCK_PREFIX() \
7095 do \
7096 { \
7097 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7098 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7099 } while (0)
7100
7101/** The instruction is not available in 64-bit mode, throw #UD if we're in
7102 * 64-bit mode. */
7103#define IEMOP_HLP_NO_64BIT() \
7104 do \
7105 { \
7106 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7107 return IEMOP_RAISE_INVALID_OPCODE(); \
7108 } while (0)
7109
7110/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7111#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7112 do \
7113 { \
7114 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7115 iemRecalEffOpSize64Default(pIemCpu); \
7116 } while (0)
7117
7118/** The instruction has 64-bit operand size if 64-bit mode. */
7119#define IEMOP_HLP_64BIT_OP_SIZE() \
7120 do \
7121 { \
7122 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7123 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7124 } while (0)
7125
7126/**
7127 * Done decoding.
7128 */
7129#define IEMOP_HLP_DONE_DECODING() \
7130 do \
7131 { \
7132 /*nothing for now, maybe later... */ \
7133 } while (0)
7134
7135/**
7136 * Done decoding, raise \#UD exception if lock prefix present.
7137 */
7138#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7139 do \
7140 { \
7141 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7142 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7143 } while (0)
7144
7145
7146/**
7147 * Calculates the effective address of a ModR/M memory operand.
7148 *
7149 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7150 *
7151 * @return Strict VBox status code.
7152 * @param pIemCpu The IEM per CPU data.
7153 * @param bRm The ModRM byte.
7154 * @param pGCPtrEff Where to return the effective address.
7155 */
7156static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
7157{
7158 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7159 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7160#define SET_SS_DEF() \
7161 do \
7162 { \
7163 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7164 pIemCpu->iEffSeg = X86_SREG_SS; \
7165 } while (0)
7166
7167/** @todo Check the effective address size crap! */
7168 switch (pIemCpu->enmEffAddrMode)
7169 {
7170 case IEMMODE_16BIT:
7171 {
7172 uint16_t u16EffAddr;
7173
7174 /* Handle the disp16 form with no registers first. */
7175 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7176 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7177 else
7178 {
7179 /* Get the displacment. */
7180 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7181 {
7182 case 0: u16EffAddr = 0; break;
7183 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7184 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7185 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7186 }
7187
7188 /* Add the base and index registers to the disp. */
7189 switch (bRm & X86_MODRM_RM_MASK)
7190 {
7191 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7192 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7193 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7194 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7195 case 4: u16EffAddr += pCtx->si; break;
7196 case 5: u16EffAddr += pCtx->di; break;
7197 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7198 case 7: u16EffAddr += pCtx->bx; break;
7199 }
7200 }
7201
7202 *pGCPtrEff = u16EffAddr;
7203 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
7204 return VINF_SUCCESS;
7205 }
7206
7207 case IEMMODE_32BIT:
7208 {
7209 uint32_t u32EffAddr;
7210
7211 /* Handle the disp32 form with no registers first. */
7212 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7213 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7214 else
7215 {
7216 /* Get the register (or SIB) value. */
7217 switch ((bRm & X86_MODRM_RM_MASK))
7218 {
7219 case 0: u32EffAddr = pCtx->eax; break;
7220 case 1: u32EffAddr = pCtx->ecx; break;
7221 case 2: u32EffAddr = pCtx->edx; break;
7222 case 3: u32EffAddr = pCtx->ebx; break;
7223 case 4: /* SIB */
7224 {
7225 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7226
7227 /* Get the index and scale it. */
7228 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7229 {
7230 case 0: u32EffAddr = pCtx->eax; break;
7231 case 1: u32EffAddr = pCtx->ecx; break;
7232 case 2: u32EffAddr = pCtx->edx; break;
7233 case 3: u32EffAddr = pCtx->ebx; break;
7234 case 4: u32EffAddr = 0; /*none */ break;
7235 case 5: u32EffAddr = pCtx->ebp; break;
7236 case 6: u32EffAddr = pCtx->esi; break;
7237 case 7: u32EffAddr = pCtx->edi; break;
7238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7239 }
7240 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7241
7242 /* add base */
7243 switch (bSib & X86_SIB_BASE_MASK)
7244 {
7245 case 0: u32EffAddr += pCtx->eax; break;
7246 case 1: u32EffAddr += pCtx->ecx; break;
7247 case 2: u32EffAddr += pCtx->edx; break;
7248 case 3: u32EffAddr += pCtx->ebx; break;
7249 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7250 case 5:
7251 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7252 {
7253 u32EffAddr += pCtx->ebp;
7254 SET_SS_DEF();
7255 }
7256 else
7257 {
7258 uint32_t u32Disp;
7259 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7260 u32EffAddr += u32Disp;
7261 }
7262 break;
7263 case 6: u32EffAddr += pCtx->esi; break;
7264 case 7: u32EffAddr += pCtx->edi; break;
7265 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7266 }
7267 break;
7268 }
7269 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7270 case 6: u32EffAddr = pCtx->esi; break;
7271 case 7: u32EffAddr = pCtx->edi; break;
7272 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7273 }
7274
7275 /* Get and add the displacement. */
7276 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7277 {
7278 case 0:
7279 break;
7280 case 1:
7281 {
7282 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7283 u32EffAddr += i8Disp;
7284 break;
7285 }
7286 case 2:
7287 {
7288 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7289 u32EffAddr += u32Disp;
7290 break;
7291 }
7292 default:
7293 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7294 }
7295
7296 }
7297 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7298 *pGCPtrEff = u32EffAddr;
7299 else
7300 {
7301 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7302 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7303 }
7304 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7305 return VINF_SUCCESS;
7306 }
7307
7308 case IEMMODE_64BIT:
7309 {
7310 uint64_t u64EffAddr;
7311
7312 /* Handle the rip+disp32 form with no registers first. */
7313 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7314 {
7315 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7316 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
7317 }
7318 else
7319 {
7320 /* Get the register (or SIB) value. */
7321 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7322 {
7323 case 0: u64EffAddr = pCtx->rax; break;
7324 case 1: u64EffAddr = pCtx->rcx; break;
7325 case 2: u64EffAddr = pCtx->rdx; break;
7326 case 3: u64EffAddr = pCtx->rbx; break;
7327 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7328 case 6: u64EffAddr = pCtx->rsi; break;
7329 case 7: u64EffAddr = pCtx->rdi; break;
7330 case 8: u64EffAddr = pCtx->r8; break;
7331 case 9: u64EffAddr = pCtx->r9; break;
7332 case 10: u64EffAddr = pCtx->r10; break;
7333 case 11: u64EffAddr = pCtx->r11; break;
7334 case 13: u64EffAddr = pCtx->r13; break;
7335 case 14: u64EffAddr = pCtx->r14; break;
7336 case 15: u64EffAddr = pCtx->r15; break;
7337 /* SIB */
7338 case 4:
7339 case 12:
7340 {
7341 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7342
7343 /* Get the index and scale it. */
7344 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7345 {
7346 case 0: u64EffAddr = pCtx->rax; break;
7347 case 1: u64EffAddr = pCtx->rcx; break;
7348 case 2: u64EffAddr = pCtx->rdx; break;
7349 case 3: u64EffAddr = pCtx->rbx; break;
7350 case 4: u64EffAddr = 0; /*none */ break;
7351 case 5: u64EffAddr = pCtx->rbp; break;
7352 case 6: u64EffAddr = pCtx->rsi; break;
7353 case 7: u64EffAddr = pCtx->rdi; break;
7354 case 8: u64EffAddr = pCtx->r8; break;
7355 case 9: u64EffAddr = pCtx->r9; break;
7356 case 10: u64EffAddr = pCtx->r10; break;
7357 case 11: u64EffAddr = pCtx->r11; break;
7358 case 12: u64EffAddr = pCtx->r12; break;
7359 case 13: u64EffAddr = pCtx->r13; break;
7360 case 14: u64EffAddr = pCtx->r14; break;
7361 case 15: u64EffAddr = pCtx->r15; break;
7362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7363 }
7364 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7365
7366 /* add base */
7367 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7368 {
7369 case 0: u64EffAddr += pCtx->rax; break;
7370 case 1: u64EffAddr += pCtx->rcx; break;
7371 case 2: u64EffAddr += pCtx->rdx; break;
7372 case 3: u64EffAddr += pCtx->rbx; break;
7373 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7374 case 6: u64EffAddr += pCtx->rsi; break;
7375 case 7: u64EffAddr += pCtx->rdi; break;
7376 case 8: u64EffAddr += pCtx->r8; break;
7377 case 9: u64EffAddr += pCtx->r9; break;
7378 case 10: u64EffAddr += pCtx->r10; break;
7379 case 11: u64EffAddr += pCtx->r11; break;
7380 case 14: u64EffAddr += pCtx->r14; break;
7381 case 15: u64EffAddr += pCtx->r15; break;
7382 /* complicated encodings */
7383 case 5:
7384 case 13:
7385 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7386 {
7387 if (!pIemCpu->uRexB)
7388 {
7389 u64EffAddr += pCtx->rbp;
7390 SET_SS_DEF();
7391 }
7392 else
7393 u64EffAddr += pCtx->r13;
7394 }
7395 else
7396 {
7397 uint32_t u32Disp;
7398 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7399 u64EffAddr += (int32_t)u32Disp;
7400 }
7401 break;
7402 }
7403 break;
7404 }
7405 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7406 }
7407
7408 /* Get and add the displacement. */
7409 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7410 {
7411 case 0:
7412 break;
7413 case 1:
7414 {
7415 int8_t i8Disp;
7416 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7417 u64EffAddr += i8Disp;
7418 break;
7419 }
7420 case 2:
7421 {
7422 uint32_t u32Disp;
7423 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7424 u64EffAddr += (int32_t)u32Disp;
7425 break;
7426 }
7427 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7428 }
7429
7430 }
7431 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7432 *pGCPtrEff = u64EffAddr;
7433 else
7434 *pGCPtrEff = u64EffAddr & UINT16_MAX;
7435 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7436 return VINF_SUCCESS;
7437 }
7438 }
7439
7440 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7441}
7442
7443/** @} */
7444
7445
7446
7447/*
7448 * Include the instructions
7449 */
7450#include "IEMAllInstructions.cpp.h"
7451
7452
7453
7454
7455#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7456
7457/**
7458 * Sets up execution verification mode.
7459 */
7460static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7461{
7462 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7463 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7464
7465 /*
7466 * Always note down the address of the current instruction.
7467 */
7468 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7469 pIemCpu->uOldRip = pOrgCtx->rip;
7470
7471 /*
7472 * Enable verification and/or logging.
7473 */
7474 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7475 if ( pIemCpu->fNoRem
7476 && ( 0
7477#if 0 /* auto enable on first paged protected mode interrupt */
7478 || ( pOrgCtx->eflags.Bits.u1IF
7479 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7480 && TRPMHasTrap(pVCpu)
7481 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7482#endif
7483#if 0
7484 || ( pOrgCtx->cs == 0x10
7485 && ( pOrgCtx->rip == 0x90119e3e
7486 || pOrgCtx->rip == 0x901d9810)
7487#endif
7488#if 0 /* Auto enable DSL - FPU stuff. */
7489 || ( pOrgCtx->cs == 0x10
7490 && (// pOrgCtx->rip == 0xc02ec07f
7491 //|| pOrgCtx->rip == 0xc02ec082
7492 //|| pOrgCtx->rip == 0xc02ec0c9
7493 0
7494 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7495#endif
7496#if 0 /* Auto enable DSL - fstp st0 stuff. */
7497 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7498#endif
7499#if 0
7500 || pOrgCtx->rip == 0x9022bb3a
7501#endif
7502#if 0
7503 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7504#endif
7505#if 0
7506 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7507 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7508#endif
7509#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7510 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7511 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7512 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7513#endif
7514#if 0 /* NT4SP1 - xadd early boot. */
7515 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7516#endif
7517#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7518 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7519#endif
7520#if 0 /* NT4SP1 - cmpxchg (AMD). */
7521 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7522#endif
7523#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7524 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7525#endif
7526#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7527 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7528
7529#endif
7530#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7531 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7532
7533#endif
7534#if 0 /* NT4SP1 - frstor [ecx] */
7535 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7536#endif
7537 )
7538 )
7539 {
7540 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7541 RTLogFlags(NULL, "enabled");
7542 pIemCpu->fNoRem = false;
7543 }
7544
7545 /*
7546 * Switch state.
7547 */
7548 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7549 {
7550 static CPUMCTX s_DebugCtx; /* Ugly! */
7551
7552 s_DebugCtx = *pOrgCtx;
7553 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7554 }
7555
7556 /*
7557 * See if there is an interrupt pending in TRPM and inject it if we can.
7558 */
7559 pIemCpu->uInjectCpl = UINT8_MAX;
7560 if ( pOrgCtx->eflags.Bits.u1IF
7561 && TRPMHasTrap(pVCpu)
7562 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7563 {
7564 uint8_t u8TrapNo;
7565 TRPMEVENT enmType;
7566 RTGCUINT uErrCode;
7567 RTGCPTR uCr2;
7568 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
7569 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7570 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7571 TRPMResetTrap(pVCpu);
7572 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7573 }
7574
7575 /*
7576 * Reset the counters.
7577 */
7578 pIemCpu->cIOReads = 0;
7579 pIemCpu->cIOWrites = 0;
7580 pIemCpu->fIgnoreRaxRdx = false;
7581 pIemCpu->fOverlappingMovs = false;
7582 pIemCpu->fUndefinedEFlags = 0;
7583
7584 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7585 {
7586 /*
7587 * Free all verification records.
7588 */
7589 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7590 pIemCpu->pIemEvtRecHead = NULL;
7591 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7592 do
7593 {
7594 while (pEvtRec)
7595 {
7596 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7597 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7598 pIemCpu->pFreeEvtRec = pEvtRec;
7599 pEvtRec = pNext;
7600 }
7601 pEvtRec = pIemCpu->pOtherEvtRecHead;
7602 pIemCpu->pOtherEvtRecHead = NULL;
7603 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7604 } while (pEvtRec);
7605 }
7606}
7607
7608
7609/**
7610 * Allocate an event record.
7611 * @returns Pointer to a record.
7612 */
7613static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7614{
7615 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7616 return NULL;
7617
7618 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7619 if (pEvtRec)
7620 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7621 else
7622 {
7623 if (!pIemCpu->ppIemEvtRecNext)
7624 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7625
7626 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7627 if (!pEvtRec)
7628 return NULL;
7629 }
7630 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7631 pEvtRec->pNext = NULL;
7632 return pEvtRec;
7633}
7634
7635
7636/**
7637 * IOMMMIORead notification.
7638 */
7639VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7640{
7641 PVMCPU pVCpu = VMMGetCpu(pVM);
7642 if (!pVCpu)
7643 return;
7644 PIEMCPU pIemCpu = &pVCpu->iem.s;
7645 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7646 if (!pEvtRec)
7647 return;
7648 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7649 pEvtRec->u.RamRead.GCPhys = GCPhys;
7650 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7651 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7652 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7653}
7654
7655
7656/**
7657 * IOMMMIOWrite notification.
7658 */
7659VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7660{
7661 PVMCPU pVCpu = VMMGetCpu(pVM);
7662 if (!pVCpu)
7663 return;
7664 PIEMCPU pIemCpu = &pVCpu->iem.s;
7665 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7666 if (!pEvtRec)
7667 return;
7668 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7669 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7670 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7671 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7672 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7673 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7674 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7675 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7676 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7677}
7678
7679
7680/**
7681 * IOMIOPortRead notification.
7682 */
7683VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7684{
7685 PVMCPU pVCpu = VMMGetCpu(pVM);
7686 if (!pVCpu)
7687 return;
7688 PIEMCPU pIemCpu = &pVCpu->iem.s;
7689 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7690 if (!pEvtRec)
7691 return;
7692 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7693 pEvtRec->u.IOPortRead.Port = Port;
7694 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7695 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7696 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7697}
7698
7699/**
7700 * IOMIOPortWrite notification.
7701 */
7702VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7703{
7704 PVMCPU pVCpu = VMMGetCpu(pVM);
7705 if (!pVCpu)
7706 return;
7707 PIEMCPU pIemCpu = &pVCpu->iem.s;
7708 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7709 if (!pEvtRec)
7710 return;
7711 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7712 pEvtRec->u.IOPortWrite.Port = Port;
7713 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7714 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7715 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7716 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7717}
7718
7719
7720VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7721{
7722 AssertFailed();
7723}
7724
7725
7726VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7727{
7728 AssertFailed();
7729}
7730
7731
7732/**
7733 * Fakes and records an I/O port read.
7734 *
7735 * @returns VINF_SUCCESS.
7736 * @param pIemCpu The IEM per CPU data.
7737 * @param Port The I/O port.
7738 * @param pu32Value Where to store the fake value.
7739 * @param cbValue The size of the access.
7740 */
7741static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7742{
7743 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7744 if (pEvtRec)
7745 {
7746 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7747 pEvtRec->u.IOPortRead.Port = Port;
7748 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7749 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7750 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7751 }
7752 pIemCpu->cIOReads++;
7753 *pu32Value = 0xcccccccc;
7754 return VINF_SUCCESS;
7755}
7756
7757
7758/**
7759 * Fakes and records an I/O port write.
7760 *
7761 * @returns VINF_SUCCESS.
7762 * @param pIemCpu The IEM per CPU data.
7763 * @param Port The I/O port.
7764 * @param u32Value The value being written.
7765 * @param cbValue The size of the access.
7766 */
7767static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7768{
7769 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7770 if (pEvtRec)
7771 {
7772 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7773 pEvtRec->u.IOPortWrite.Port = Port;
7774 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7775 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7776 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7777 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7778 }
7779 pIemCpu->cIOWrites++;
7780 return VINF_SUCCESS;
7781}
7782
7783
7784/**
7785 * Used to add extra details about a stub case.
7786 * @param pIemCpu The IEM per CPU state.
7787 */
7788static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7789{
7790 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7791 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7792 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7793 char szRegs[4096];
7794 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7795 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7796 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7797 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7798 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7799 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7800 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7801 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7802 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7803 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7804 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7805 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7806 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7807 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7808 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7809 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7810 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7811 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7812 " efer=%016VR{efer}\n"
7813 " pat=%016VR{pat}\n"
7814 " sf_mask=%016VR{sf_mask}\n"
7815 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7816 " lstar=%016VR{lstar}\n"
7817 " star=%016VR{star} cstar=%016VR{cstar}\n"
7818 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7819 );
7820
7821 char szInstr1[256];
7822 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
7823 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7824 szInstr1, sizeof(szInstr1), NULL);
7825 char szInstr2[256];
7826 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
7827 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7828 szInstr2, sizeof(szInstr2), NULL);
7829
7830 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7831}
7832
7833
7834/**
7835 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7836 * dump to the assertion info.
7837 *
7838 * @param pEvtRec The record to dump.
7839 */
7840static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7841{
7842 switch (pEvtRec->enmEvent)
7843 {
7844 case IEMVERIFYEVENT_IOPORT_READ:
7845 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7846 pEvtRec->u.IOPortWrite.Port,
7847 pEvtRec->u.IOPortWrite.cbValue);
7848 break;
7849 case IEMVERIFYEVENT_IOPORT_WRITE:
7850 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7851 pEvtRec->u.IOPortWrite.Port,
7852 pEvtRec->u.IOPortWrite.cbValue,
7853 pEvtRec->u.IOPortWrite.u32Value);
7854 break;
7855 case IEMVERIFYEVENT_RAM_READ:
7856 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7857 pEvtRec->u.RamRead.GCPhys,
7858 pEvtRec->u.RamRead.cb);
7859 break;
7860 case IEMVERIFYEVENT_RAM_WRITE:
7861 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7862 pEvtRec->u.RamWrite.GCPhys,
7863 pEvtRec->u.RamWrite.cb,
7864 (int)pEvtRec->u.RamWrite.cb,
7865 pEvtRec->u.RamWrite.ab);
7866 break;
7867 default:
7868 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7869 break;
7870 }
7871}
7872
7873
7874/**
7875 * Raises an assertion on the specified record, showing the given message with
7876 * a record dump attached.
7877 *
7878 * @param pIemCpu The IEM per CPU data.
7879 * @param pEvtRec1 The first record.
7880 * @param pEvtRec2 The second record.
7881 * @param pszMsg The message explaining why we're asserting.
7882 */
7883static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7884{
7885 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7886 iemVerifyAssertAddRecordDump(pEvtRec1);
7887 iemVerifyAssertAddRecordDump(pEvtRec2);
7888 iemVerifyAssertMsg2(pIemCpu);
7889 RTAssertPanic();
7890}
7891
7892
7893/**
7894 * Raises an assertion on the specified record, showing the given message with
7895 * a record dump attached.
7896 *
7897 * @param pIemCpu The IEM per CPU data.
7898 * @param pEvtRec1 The first record.
7899 * @param pszMsg The message explaining why we're asserting.
7900 */
7901static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7902{
7903 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7904 iemVerifyAssertAddRecordDump(pEvtRec);
7905 iemVerifyAssertMsg2(pIemCpu);
7906 RTAssertPanic();
7907}
7908
7909
7910/**
7911 * Verifies a write record.
7912 *
7913 * @param pIemCpu The IEM per CPU data.
7914 * @param pEvtRec The write record.
7915 */
7916static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7917{
7918 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7919 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7920 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7921 if ( RT_FAILURE(rc)
7922 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7923 {
7924 /* fend off ins */
7925 if ( !pIemCpu->cIOReads
7926 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7927 || ( pEvtRec->u.RamWrite.cb != 1
7928 && pEvtRec->u.RamWrite.cb != 2
7929 && pEvtRec->u.RamWrite.cb != 4) )
7930 {
7931 /* fend off ROMs */
7932 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7933 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7934 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7935 {
7936 /* fend off fxsave */
7937 if (pEvtRec->u.RamWrite.cb != 512)
7938 {
7939 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7940 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7941 RTAssertMsg2Add("REM: %.*Rhxs\n"
7942 "IEM: %.*Rhxs\n",
7943 pEvtRec->u.RamWrite.cb, abBuf,
7944 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7945 iemVerifyAssertAddRecordDump(pEvtRec);
7946 iemVerifyAssertMsg2(pIemCpu);
7947 RTAssertPanic();
7948 }
7949 }
7950 }
7951 }
7952
7953}
7954
7955/**
7956 * Performs the post-execution verfication checks.
7957 */
7958static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7959{
7960 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7961 return;
7962
7963 /*
7964 * Switch back the state.
7965 */
7966 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7967 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7968 Assert(pOrgCtx != pDebugCtx);
7969 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7970
7971 /*
7972 * Execute the instruction in REM.
7973 */
7974 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7975 EMRemLock(pVM);
7976 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7977 AssertRC(rc);
7978 EMRemUnlock(pVM);
7979
7980 /*
7981 * Compare the register states.
7982 */
7983 unsigned cDiffs = 0;
7984 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7985 {
7986 //Log(("REM and IEM ends up with different registers!\n"));
7987
7988# define CHECK_FIELD(a_Field) \
7989 do \
7990 { \
7991 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7992 { \
7993 switch (sizeof(pOrgCtx->a_Field)) \
7994 { \
7995 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7996 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7997 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7998 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7999 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8000 } \
8001 cDiffs++; \
8002 } \
8003 } while (0)
8004
8005# define CHECK_BIT_FIELD(a_Field) \
8006 do \
8007 { \
8008 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8009 { \
8010 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8011 cDiffs++; \
8012 } \
8013 } while (0)
8014
8015# define CHECK_SEL(a_Sel) \
8016 do \
8017 { \
8018 CHECK_FIELD(a_Sel.Sel); \
8019 CHECK_FIELD(a_Sel.Attr.u); \
8020 CHECK_FIELD(a_Sel.u64Base); \
8021 CHECK_FIELD(a_Sel.u32Limit); \
8022 CHECK_FIELD(a_Sel.fFlags); \
8023 } while (0)
8024
8025#if 1 /* The recompiler doesn't update these the intel way. */
8026 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8027 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8028 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8029 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8030 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8031 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8032 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8033 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8034 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8035 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8036#endif
8037 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8038 {
8039 RTAssertMsg2Weak(" the FPU state differs\n");
8040 cDiffs++;
8041 CHECK_FIELD(fpu.FCW);
8042 CHECK_FIELD(fpu.FSW);
8043 CHECK_FIELD(fpu.FTW);
8044 CHECK_FIELD(fpu.FOP);
8045 CHECK_FIELD(fpu.FPUIP);
8046 CHECK_FIELD(fpu.CS);
8047 CHECK_FIELD(fpu.Rsrvd1);
8048 CHECK_FIELD(fpu.FPUDP);
8049 CHECK_FIELD(fpu.DS);
8050 CHECK_FIELD(fpu.Rsrvd2);
8051 CHECK_FIELD(fpu.MXCSR);
8052 CHECK_FIELD(fpu.MXCSR_MASK);
8053 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8054 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8055 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8056 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8057 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8058 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8059 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8060 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8061 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8062 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8063 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8064 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8065 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8066 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8067 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8068 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8069 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8070 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8071 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8072 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8073 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8074 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8075 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8076 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8077 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8078 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8079 }
8080 CHECK_FIELD(rip);
8081 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8082 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8083 {
8084 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8085 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8086 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8087 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8088 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8089 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8090 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8091 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8092 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8093 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8094 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8095 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8096 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8097 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8098 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8099 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8100 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8101 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8102 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8103 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8104 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8105 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8106 }
8107
8108 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8109 CHECK_FIELD(rax);
8110 CHECK_FIELD(rcx);
8111 if (!pIemCpu->fIgnoreRaxRdx)
8112 CHECK_FIELD(rdx);
8113 CHECK_FIELD(rbx);
8114 CHECK_FIELD(rsp);
8115 CHECK_FIELD(rbp);
8116 CHECK_FIELD(rsi);
8117 CHECK_FIELD(rdi);
8118 CHECK_FIELD(r8);
8119 CHECK_FIELD(r9);
8120 CHECK_FIELD(r10);
8121 CHECK_FIELD(r11);
8122 CHECK_FIELD(r12);
8123 CHECK_FIELD(r13);
8124 CHECK_SEL(cs);
8125 CHECK_SEL(ss);
8126 CHECK_SEL(ds);
8127 CHECK_SEL(es);
8128 CHECK_SEL(fs);
8129 CHECK_SEL(gs);
8130 CHECK_FIELD(cr0);
8131 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8132 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8133 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8134 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8135 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8136 {
8137 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8138 { /* ignore */ }
8139 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8140 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8141 { /* ignore */ }
8142 else
8143 CHECK_FIELD(cr2);
8144 }
8145 CHECK_FIELD(cr3);
8146 CHECK_FIELD(cr4);
8147 CHECK_FIELD(dr[0]);
8148 CHECK_FIELD(dr[1]);
8149 CHECK_FIELD(dr[2]);
8150 CHECK_FIELD(dr[3]);
8151 CHECK_FIELD(dr[6]);
8152 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8153 CHECK_FIELD(dr[7]);
8154 CHECK_FIELD(gdtr.cbGdt);
8155 CHECK_FIELD(gdtr.pGdt);
8156 CHECK_FIELD(idtr.cbIdt);
8157 CHECK_FIELD(idtr.pIdt);
8158 CHECK_SEL(ldtr);
8159 CHECK_SEL(tr);
8160 CHECK_FIELD(SysEnter.cs);
8161 CHECK_FIELD(SysEnter.eip);
8162 CHECK_FIELD(SysEnter.esp);
8163 CHECK_FIELD(msrEFER);
8164 CHECK_FIELD(msrSTAR);
8165 CHECK_FIELD(msrPAT);
8166 CHECK_FIELD(msrLSTAR);
8167 CHECK_FIELD(msrCSTAR);
8168 CHECK_FIELD(msrSFMASK);
8169 CHECK_FIELD(msrKERNELGSBASE);
8170
8171 if (cDiffs != 0)
8172 {
8173 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
8174 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8175 iemVerifyAssertMsg2(pIemCpu);
8176 RTAssertPanic();
8177 }
8178# undef CHECK_FIELD
8179# undef CHECK_BIT_FIELD
8180 }
8181
8182 /*
8183 * If the register state compared fine, check the verification event
8184 * records.
8185 */
8186 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8187 {
8188 /*
8189 * Compare verficiation event records.
8190 * - I/O port accesses should be a 1:1 match.
8191 */
8192 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8193 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8194 while (pIemRec && pOtherRec)
8195 {
8196 /* Since we might miss RAM writes and reads, ignore reads and check
8197 that any written memory is the same extra ones. */
8198 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8199 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8200 && pIemRec->pNext)
8201 {
8202 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8203 iemVerifyWriteRecord(pIemCpu, pIemRec);
8204 pIemRec = pIemRec->pNext;
8205 }
8206
8207 /* Do the compare. */
8208 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8209 {
8210 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8211 break;
8212 }
8213 bool fEquals;
8214 switch (pIemRec->enmEvent)
8215 {
8216 case IEMVERIFYEVENT_IOPORT_READ:
8217 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8218 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8219 break;
8220 case IEMVERIFYEVENT_IOPORT_WRITE:
8221 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8222 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8223 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8224 break;
8225 case IEMVERIFYEVENT_RAM_READ:
8226 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8227 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8228 break;
8229 case IEMVERIFYEVENT_RAM_WRITE:
8230 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8231 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8232 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8233 break;
8234 default:
8235 fEquals = false;
8236 break;
8237 }
8238 if (!fEquals)
8239 {
8240 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8241 break;
8242 }
8243
8244 /* advance */
8245 pIemRec = pIemRec->pNext;
8246 pOtherRec = pOtherRec->pNext;
8247 }
8248
8249 /* Ignore extra writes and reads. */
8250 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8251 {
8252 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8253 iemVerifyWriteRecord(pIemCpu, pIemRec);
8254 pIemRec = pIemRec->pNext;
8255 }
8256 if (pIemRec != NULL)
8257 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8258 else if (pOtherRec != NULL)
8259 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
8260 }
8261 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8262}
8263
8264#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8265
8266/* stubs */
8267static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8268{
8269 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8270 return VERR_INTERNAL_ERROR;
8271}
8272
8273static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8274{
8275 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8276 return VERR_INTERNAL_ERROR;
8277}
8278
8279#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8280
8281
8282/**
8283 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8284 * IEMExecOneWithPrefetchedByPC.
8285 *
8286 * @return Strict VBox status code.
8287 * @param pVCpu The current virtual CPU.
8288 * @param pIemCpu The IEM per CPU data.
8289 * @param fExecuteInhibit If set, execute the instruction following CLI,
8290 * POP SS and MOV SS,GR.
8291 */
8292DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8293{
8294 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8295 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8296 if (rcStrict == VINF_SUCCESS)
8297 pIemCpu->cInstructions++;
8298//#ifdef DEBUG
8299// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8300//#endif
8301
8302 /* Execute the next instruction as well if a cli, pop ss or
8303 mov ss, Gr has just completed successfully. */
8304 if ( fExecuteInhibit
8305 && rcStrict == VINF_SUCCESS
8306 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8307 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8308 {
8309 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8310 if (rcStrict == VINF_SUCCESS)
8311 {
8312 b; IEM_OPCODE_GET_NEXT_U8(&b);
8313 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8314 if (rcStrict == VINF_SUCCESS)
8315 pIemCpu->cInstructions++;
8316 }
8317 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8318 }
8319
8320 /*
8321 * Return value fiddling and statistics.
8322 */
8323 if (rcStrict != VINF_SUCCESS)
8324 {
8325 if (RT_SUCCESS(rcStrict))
8326 {
8327 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8328 int32_t const rcPassUp = pIemCpu->rcPassUp;
8329 if (rcPassUp == VINF_SUCCESS)
8330 pIemCpu->cRetInfStatuses++;
8331 else if ( rcPassUp < VINF_EM_FIRST
8332 || rcPassUp > VINF_EM_LAST
8333 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8334 {
8335 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8336 pIemCpu->cRetPassUpStatus++;
8337 rcStrict = rcPassUp;
8338 }
8339 else
8340 {
8341 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8342 pIemCpu->cRetInfStatuses++;
8343 }
8344 }
8345 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8346 pIemCpu->cRetAspectNotImplemented++;
8347 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8348 pIemCpu->cRetInstrNotImplemented++;
8349#ifdef IEM_VERIFICATION_MODE_FULL
8350 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8351 rcStrict = VINF_SUCCESS;
8352#endif
8353 else
8354 pIemCpu->cRetErrStatuses++;
8355 }
8356 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8357 {
8358 pIemCpu->cRetPassUpStatus++;
8359 rcStrict = pIemCpu->rcPassUp;
8360 }
8361
8362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8364#if defined(IEM_VERIFICATION_MODE_FULL)
8365 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8367 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8368 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8369#endif
8370 return rcStrict;
8371}
8372
8373
8374/**
8375 * Execute one instruction.
8376 *
8377 * @return Strict VBox status code.
8378 * @param pVCpu The current virtual CPU.
8379 */
8380VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8381{
8382 PIEMCPU pIemCpu = &pVCpu->iem.s;
8383
8384#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8385 iemExecVerificationModeSetup(pIemCpu);
8386#endif
8387#ifdef LOG_ENABLED
8388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8389# ifdef IN_RING3
8390 if (LogIs2Enabled())
8391 {
8392 char szInstr[256];
8393 uint32_t cbInstr = 0;
8394 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8395 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8396 szInstr, sizeof(szInstr), &cbInstr);
8397
8398 Log3(("**** "
8399 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8400 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8401 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8402 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8403 " %s\n"
8404 ,
8405 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8406 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8407 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8408 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8409 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8410 szInstr));
8411
8412 if (LogIs3Enabled())
8413 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
8414 }
8415 else
8416# endif
8417 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8418 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8419#endif
8420
8421 /*
8422 * Do the decoding and emulation.
8423 */
8424 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8425 if (rcStrict == VINF_SUCCESS)
8426 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8427
8428#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8429 /*
8430 * Assert some sanity.
8431 */
8432 iemExecVerificationModeCheck(pIemCpu);
8433#endif
8434 if (rcStrict != VINF_SUCCESS)
8435 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8436 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8437 return rcStrict;
8438}
8439
8440
8441VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8442{
8443 PIEMCPU pIemCpu = &pVCpu->iem.s;
8444 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8445 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8446
8447 iemInitDecoder(pIemCpu, false);
8448 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8449
8450 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8451 if (rcStrict == VINF_SUCCESS)
8452 {
8453 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8454 if (pcbWritten)
8455 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8456 }
8457 return rcStrict;
8458}
8459
8460
8461VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8462 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8463{
8464 PIEMCPU pIemCpu = &pVCpu->iem.s;
8465 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8466 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8467
8468 VBOXSTRICTRC rcStrict;
8469 if ( cbOpcodeBytes
8470 && pCtx->rip == OpcodeBytesPC)
8471 {
8472 iemInitDecoder(pIemCpu, false);
8473 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8474 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8475 rcStrict = VINF_SUCCESS;
8476 }
8477 else
8478 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8479 if (rcStrict == VINF_SUCCESS)
8480 {
8481 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8482 }
8483 return rcStrict;
8484}
8485
8486
8487VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8488{
8489 PIEMCPU pIemCpu = &pVCpu->iem.s;
8490 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8491 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8492
8493 iemInitDecoder(pIemCpu, true);
8494 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8495
8496 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8497 if (rcStrict == VINF_SUCCESS)
8498 {
8499 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8500 if (pcbWritten)
8501 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8502 }
8503 return rcStrict;
8504}
8505
8506
8507VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8508 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8509{
8510 PIEMCPU pIemCpu = &pVCpu->iem.s;
8511 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8512 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8513
8514 VBOXSTRICTRC rcStrict;
8515 if ( cbOpcodeBytes
8516 && pCtx->rip == OpcodeBytesPC)
8517 {
8518 iemInitDecoder(pIemCpu, true);
8519 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8520 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8521 rcStrict = VINF_SUCCESS;
8522 }
8523 else
8524 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8525 if (rcStrict == VINF_SUCCESS)
8526 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8527 return rcStrict;
8528}
8529
8530
8531VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8532{
8533 return IEMExecOne(pVCpu);
8534}
8535
8536
8537
8538/**
8539 * Injects a trap, fault, abort, software interrupt or external interrupt.
8540 *
8541 * The parameter list matches TRPMQueryTrapAll pretty closely.
8542 *
8543 * @returns Strict VBox status code.
8544 * @param pVCpu The current virtual CPU.
8545 * @param u8TrapNo The trap number.
8546 * @param enmType What type is it (trap/fault/abort), software
8547 * interrupt or hardware interrupt.
8548 * @param uErrCode The error code if applicable.
8549 * @param uCr2 The CR2 value if applicable.
8550 */
8551VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8552{
8553 iemInitDecoder(&pVCpu->iem.s, false);
8554
8555 uint32_t fFlags;
8556 switch (enmType)
8557 {
8558 case TRPM_HARDWARE_INT:
8559 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8560 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8561 uErrCode = uCr2 = 0;
8562 break;
8563
8564 case TRPM_SOFTWARE_INT:
8565 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8566 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8567 uErrCode = uCr2 = 0;
8568 break;
8569
8570 case TRPM_TRAP:
8571 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8572 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8573 if (u8TrapNo == X86_XCPT_PF)
8574 fFlags |= IEM_XCPT_FLAGS_CR2;
8575 switch (u8TrapNo)
8576 {
8577 case X86_XCPT_DF:
8578 case X86_XCPT_TS:
8579 case X86_XCPT_NP:
8580 case X86_XCPT_SS:
8581 case X86_XCPT_PF:
8582 case X86_XCPT_AC:
8583 fFlags |= IEM_XCPT_FLAGS_ERR;
8584 break;
8585 }
8586 break;
8587
8588 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8589 }
8590
8591 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8592}
8593
8594
8595VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8596{
8597 return VERR_NOT_IMPLEMENTED;
8598}
8599
8600
8601VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8602{
8603 return VERR_NOT_IMPLEMENTED;
8604}
8605
8606
8607#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8608/**
8609 * Executes a IRET instruction with default operand size.
8610 *
8611 * This is for PATM.
8612 *
8613 * @returns VBox status code.
8614 * @param pVCpu The current virtual CPU.
8615 * @param pCtxCore The register frame.
8616 */
8617VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8618{
8619 PIEMCPU pIemCpu = &pVCpu->iem.s;
8620 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8621
8622 iemCtxCoreToCtx(pCtx, pCtxCore);
8623 iemInitDecoder(pIemCpu);
8624 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8625 if (rcStrict == VINF_SUCCESS)
8626 iemCtxToCtxCore(pCtxCore, pCtx);
8627 else
8628 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8629 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8630 return rcStrict;
8631}
8632#endif
8633
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette