VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 41975

Last change on this file since 41975 was 41939, checked in by vboxsync, 13 years ago

CPUMGetGuestCPL: Drop the context core pointer and use the Guest state in CPUMCPU via pVCpu.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 290.0 KB
Line 
1/* $Id: IEMAll.cpp 41939 2012-06-27 23:59:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Additional exception details, basic enter/exit IEM
65 * state info.
66 * - Level 2 (Log2): ?
67 * - Level 3 (Log3): More detailed enter/exit IEM state info.
68 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
69 * - Level 5 (Log5): Decoding details.
70 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
71 *
72 */
73
74/*******************************************************************************
75* Header Files *
76*******************************************************************************/
77#define LOG_GROUP LOG_GROUP_IEM
78#include <VBox/vmm/iem.h>
79#include <VBox/vmm/pgm.h>
80#include <internal/pgm.h>
81#include <VBox/vmm/iom.h>
82#include <VBox/vmm/em.h>
83#include <VBox/vmm/tm.h>
84#include <VBox/vmm/dbgf.h>
85#ifdef IEM_VERIFICATION_MODE
86# include <VBox/vmm/rem.h>
87# include <VBox/vmm/mm.h>
88#endif
89#include "IEMInternal.h"
90#include <VBox/vmm/vm.h>
91#include <VBox/log.h>
92#include <VBox/err.h>
93#include <VBox/param.h>
94#include <iprt/assert.h>
95#include <iprt/string.h>
96#include <iprt/x86.h>
97
98
99/*******************************************************************************
100* Structures and Typedefs *
101*******************************************************************************/
102/** @typedef PFNIEMOP
103 * Pointer to an opcode decoder function.
104 */
105
106/** @def FNIEMOP_DEF
107 * Define an opcode decoder function.
108 *
109 * We're using macors for this so that adding and removing parameters as well as
110 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
111 *
112 * @param a_Name The function name.
113 */
114
115
116#if defined(__GNUC__) && defined(RT_ARCH_X86)
117typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
118# define FNIEMOP_DEF(a_Name) \
119 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
120# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
121 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
122# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
123 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
124
125#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
126typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
127# define FNIEMOP_DEF(a_Name) \
128 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
129# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
130 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
131# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
132 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
133
134#elif defined(__GNUC__)
135typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
136# define FNIEMOP_DEF(a_Name) \
137 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
138# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
139 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
140# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
141 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
142
143#else
144typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
145# define FNIEMOP_DEF(a_Name) \
146 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
147# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
148 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
149# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
150 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
151
152#endif
153
154
155/**
156 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
157 */
158typedef union IEMSELDESC
159{
160 /** The legacy view. */
161 X86DESC Legacy;
162 /** The long mode view. */
163 X86DESC64 Long;
164} IEMSELDESC;
165/** Pointer to a selector descriptor table entry. */
166typedef IEMSELDESC *PIEMSELDESC;
167
168
169/*******************************************************************************
170* Defined Constants And Macros *
171*******************************************************************************/
172/** @name IEM status codes.
173 *
174 * Not quite sure how this will play out in the end, just aliasing safe status
175 * codes for now.
176 *
177 * @{ */
178#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
179/** @} */
180
181/** Temporary hack to disable the double execution. Will be removed in favor
182 * of a dedicated execution mode in EM. */
183//#define IEM_VERIFICATION_MODE_NO_REM
184
185/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
186 * due to GCC lacking knowledge about the value range of a switch. */
187#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
188
189/**
190 * Call an opcode decoder function.
191 *
192 * We're using macors for this so that adding and removing parameters can be
193 * done as we please. See FNIEMOP_DEF.
194 */
195#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
196
197/**
198 * Call a common opcode decoder function taking one extra argument.
199 *
200 * We're using macors for this so that adding and removing parameters can be
201 * done as we please. See FNIEMOP_DEF_1.
202 */
203#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
204
205/**
206 * Call a common opcode decoder function taking one extra argument.
207 *
208 * We're using macors for this so that adding and removing parameters can be
209 * done as we please. See FNIEMOP_DEF_1.
210 */
211#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
212
213/**
214 * Check if we're currently executing in real or virtual 8086 mode.
215 *
216 * @returns @c true if it is, @c false if not.
217 * @param a_pIemCpu The IEM state of the current CPU.
218 */
219#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
220
221/**
222 * Check if we're currently executing in long mode.
223 *
224 * @returns @c true if it is, @c false if not.
225 * @param a_pIemCpu The IEM state of the current CPU.
226 */
227#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
228
229/**
230 * Check if we're currently executing in real mode.
231 *
232 * @returns @c true if it is, @c false if not.
233 * @param a_pIemCpu The IEM state of the current CPU.
234 */
235#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
236
237/**
238 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
239 */
240#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
241
242/**
243 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
244 */
245#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
246
247/**
248 * Tests if at least on of the specified AMD CPUID features (extended) are
249 * marked present.
250 */
251#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
252
253/**
254 * Checks if a intel CPUID feature is present.
255 */
256#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
257 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
258 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
259
260/**
261 * Check if the address is canonical.
262 */
263#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
264
265
266/*******************************************************************************
267* Global Variables *
268*******************************************************************************/
269extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
270
271
272/** Function table for the ADD instruction. */
273static const IEMOPBINSIZES g_iemAImpl_add =
274{
275 iemAImpl_add_u8, iemAImpl_add_u8_locked,
276 iemAImpl_add_u16, iemAImpl_add_u16_locked,
277 iemAImpl_add_u32, iemAImpl_add_u32_locked,
278 iemAImpl_add_u64, iemAImpl_add_u64_locked
279};
280
281/** Function table for the ADC instruction. */
282static const IEMOPBINSIZES g_iemAImpl_adc =
283{
284 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
285 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
286 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
287 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
288};
289
290/** Function table for the SUB instruction. */
291static const IEMOPBINSIZES g_iemAImpl_sub =
292{
293 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
294 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
295 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
296 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
297};
298
299/** Function table for the SBB instruction. */
300static const IEMOPBINSIZES g_iemAImpl_sbb =
301{
302 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
303 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
304 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
305 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
306};
307
308/** Function table for the OR instruction. */
309static const IEMOPBINSIZES g_iemAImpl_or =
310{
311 iemAImpl_or_u8, iemAImpl_or_u8_locked,
312 iemAImpl_or_u16, iemAImpl_or_u16_locked,
313 iemAImpl_or_u32, iemAImpl_or_u32_locked,
314 iemAImpl_or_u64, iemAImpl_or_u64_locked
315};
316
317/** Function table for the XOR instruction. */
318static const IEMOPBINSIZES g_iemAImpl_xor =
319{
320 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
321 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
322 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
323 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
324};
325
326/** Function table for the AND instruction. */
327static const IEMOPBINSIZES g_iemAImpl_and =
328{
329 iemAImpl_and_u8, iemAImpl_and_u8_locked,
330 iemAImpl_and_u16, iemAImpl_and_u16_locked,
331 iemAImpl_and_u32, iemAImpl_and_u32_locked,
332 iemAImpl_and_u64, iemAImpl_and_u64_locked
333};
334
335/** Function table for the CMP instruction.
336 * @remarks Making operand order ASSUMPTIONS.
337 */
338static const IEMOPBINSIZES g_iemAImpl_cmp =
339{
340 iemAImpl_cmp_u8, NULL,
341 iemAImpl_cmp_u16, NULL,
342 iemAImpl_cmp_u32, NULL,
343 iemAImpl_cmp_u64, NULL
344};
345
346/** Function table for the TEST instruction.
347 * @remarks Making operand order ASSUMPTIONS.
348 */
349static const IEMOPBINSIZES g_iemAImpl_test =
350{
351 iemAImpl_test_u8, NULL,
352 iemAImpl_test_u16, NULL,
353 iemAImpl_test_u32, NULL,
354 iemAImpl_test_u64, NULL
355};
356
357/** Function table for the BT instruction. */
358static const IEMOPBINSIZES g_iemAImpl_bt =
359{
360 NULL, NULL,
361 iemAImpl_bt_u16, NULL,
362 iemAImpl_bt_u32, NULL,
363 iemAImpl_bt_u64, NULL
364};
365
366/** Function table for the BTC instruction. */
367static const IEMOPBINSIZES g_iemAImpl_btc =
368{
369 NULL, NULL,
370 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
371 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
372 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
373};
374
375/** Function table for the BTR instruction. */
376static const IEMOPBINSIZES g_iemAImpl_btr =
377{
378 NULL, NULL,
379 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
380 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
381 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
382};
383
384/** Function table for the BTS instruction. */
385static const IEMOPBINSIZES g_iemAImpl_bts =
386{
387 NULL, NULL,
388 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
389 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
390 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
391};
392
393/** Function table for the BSF instruction. */
394static const IEMOPBINSIZES g_iemAImpl_bsf =
395{
396 NULL, NULL,
397 iemAImpl_bsf_u16, NULL,
398 iemAImpl_bsf_u32, NULL,
399 iemAImpl_bsf_u64, NULL
400};
401
402/** Function table for the BSR instruction. */
403static const IEMOPBINSIZES g_iemAImpl_bsr =
404{
405 NULL, NULL,
406 iemAImpl_bsr_u16, NULL,
407 iemAImpl_bsr_u32, NULL,
408 iemAImpl_bsr_u64, NULL
409};
410
411/** Function table for the IMUL instruction. */
412static const IEMOPBINSIZES g_iemAImpl_imul_two =
413{
414 NULL, NULL,
415 iemAImpl_imul_two_u16, NULL,
416 iemAImpl_imul_two_u32, NULL,
417 iemAImpl_imul_two_u64, NULL
418};
419
420/** Group 1 /r lookup table. */
421static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
422{
423 &g_iemAImpl_add,
424 &g_iemAImpl_or,
425 &g_iemAImpl_adc,
426 &g_iemAImpl_sbb,
427 &g_iemAImpl_and,
428 &g_iemAImpl_sub,
429 &g_iemAImpl_xor,
430 &g_iemAImpl_cmp
431};
432
433/** Function table for the INC instruction. */
434static const IEMOPUNARYSIZES g_iemAImpl_inc =
435{
436 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
437 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
438 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
439 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
440};
441
442/** Function table for the DEC instruction. */
443static const IEMOPUNARYSIZES g_iemAImpl_dec =
444{
445 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
446 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
447 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
448 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
449};
450
451/** Function table for the NEG instruction. */
452static const IEMOPUNARYSIZES g_iemAImpl_neg =
453{
454 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
455 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
456 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
457 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
458};
459
460/** Function table for the NOT instruction. */
461static const IEMOPUNARYSIZES g_iemAImpl_not =
462{
463 iemAImpl_not_u8, iemAImpl_not_u8_locked,
464 iemAImpl_not_u16, iemAImpl_not_u16_locked,
465 iemAImpl_not_u32, iemAImpl_not_u32_locked,
466 iemAImpl_not_u64, iemAImpl_not_u64_locked
467};
468
469
470/** Function table for the ROL instruction. */
471static const IEMOPSHIFTSIZES g_iemAImpl_rol =
472{
473 iemAImpl_rol_u8,
474 iemAImpl_rol_u16,
475 iemAImpl_rol_u32,
476 iemAImpl_rol_u64
477};
478
479/** Function table for the ROR instruction. */
480static const IEMOPSHIFTSIZES g_iemAImpl_ror =
481{
482 iemAImpl_ror_u8,
483 iemAImpl_ror_u16,
484 iemAImpl_ror_u32,
485 iemAImpl_ror_u64
486};
487
488/** Function table for the RCL instruction. */
489static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
490{
491 iemAImpl_rcl_u8,
492 iemAImpl_rcl_u16,
493 iemAImpl_rcl_u32,
494 iemAImpl_rcl_u64
495};
496
497/** Function table for the RCR instruction. */
498static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
499{
500 iemAImpl_rcr_u8,
501 iemAImpl_rcr_u16,
502 iemAImpl_rcr_u32,
503 iemAImpl_rcr_u64
504};
505
506/** Function table for the SHL instruction. */
507static const IEMOPSHIFTSIZES g_iemAImpl_shl =
508{
509 iemAImpl_shl_u8,
510 iemAImpl_shl_u16,
511 iemAImpl_shl_u32,
512 iemAImpl_shl_u64
513};
514
515/** Function table for the SHR instruction. */
516static const IEMOPSHIFTSIZES g_iemAImpl_shr =
517{
518 iemAImpl_shr_u8,
519 iemAImpl_shr_u16,
520 iemAImpl_shr_u32,
521 iemAImpl_shr_u64
522};
523
524/** Function table for the SAR instruction. */
525static const IEMOPSHIFTSIZES g_iemAImpl_sar =
526{
527 iemAImpl_sar_u8,
528 iemAImpl_sar_u16,
529 iemAImpl_sar_u32,
530 iemAImpl_sar_u64
531};
532
533
534/** Function table for the MUL instruction. */
535static const IEMOPMULDIVSIZES g_iemAImpl_mul =
536{
537 iemAImpl_mul_u8,
538 iemAImpl_mul_u16,
539 iemAImpl_mul_u32,
540 iemAImpl_mul_u64
541};
542
543/** Function table for the IMUL instruction working implicitly on rAX. */
544static const IEMOPMULDIVSIZES g_iemAImpl_imul =
545{
546 iemAImpl_imul_u8,
547 iemAImpl_imul_u16,
548 iemAImpl_imul_u32,
549 iemAImpl_imul_u64
550};
551
552/** Function table for the DIV instruction. */
553static const IEMOPMULDIVSIZES g_iemAImpl_div =
554{
555 iemAImpl_div_u8,
556 iemAImpl_div_u16,
557 iemAImpl_div_u32,
558 iemAImpl_div_u64
559};
560
561/** Function table for the MUL instruction. */
562static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
563{
564 iemAImpl_idiv_u8,
565 iemAImpl_idiv_u16,
566 iemAImpl_idiv_u32,
567 iemAImpl_idiv_u64
568};
569
570/** Function table for the SHLD instruction */
571static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
572{
573 iemAImpl_shld_u16,
574 iemAImpl_shld_u32,
575 iemAImpl_shld_u64,
576};
577
578/** Function table for the SHRD instruction */
579static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
580{
581 iemAImpl_shrd_u16,
582 iemAImpl_shrd_u32,
583 iemAImpl_shrd_u64,
584};
585
586
587/*******************************************************************************
588* Internal Functions *
589*******************************************************************************/
590static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
591/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
592static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
593static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
594static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
595static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
596static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
597static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
598static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
599static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
600static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
601static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
602static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
603static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
604static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
605static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
606static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
607static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
608static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
609static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
610static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
611static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
612static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
613static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
614
615#ifdef IEM_VERIFICATION_MODE
616static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
617#endif
618static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
619static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
620
621
622/**
623 * Initializes the decoder state.
624 *
625 * @param pIemCpu The per CPU IEM state.
626 */
627DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
628{
629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
630
631 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu));
632 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
633 ? IEMMODE_64BIT
634 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
635 ? IEMMODE_32BIT
636 : IEMMODE_16BIT;
637 pIemCpu->enmCpuMode = enmMode;
638 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
639 pIemCpu->enmEffAddrMode = enmMode;
640 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
641 pIemCpu->enmEffOpSize = enmMode;
642 pIemCpu->fPrefixes = 0;
643 pIemCpu->uRexReg = 0;
644 pIemCpu->uRexB = 0;
645 pIemCpu->uRexIndex = 0;
646 pIemCpu->iEffSeg = X86_SREG_DS;
647 pIemCpu->offOpcode = 0;
648 pIemCpu->cbOpcode = 0;
649 pIemCpu->cActiveMappings = 0;
650 pIemCpu->iNextMapping = 0;
651}
652
653
654/**
655 * Prefetch opcodes the first time when starting executing.
656 *
657 * @returns Strict VBox status code.
658 * @param pIemCpu The IEM state.
659 */
660static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
661{
662#ifdef IEM_VERIFICATION_MODE
663 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
664#endif
665 iemInitDecoder(pIemCpu);
666
667 /*
668 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
669 *
670 * First translate CS:rIP to a physical address.
671 */
672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
673 uint32_t cbToTryRead;
674 RTGCPTR GCPtrPC;
675 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
676 {
677 cbToTryRead = PAGE_SIZE;
678 GCPtrPC = pCtx->rip;
679 if (!IEM_IS_CANONICAL(GCPtrPC))
680 return iemRaiseGeneralProtectionFault0(pIemCpu);
681 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
682 }
683 else
684 {
685 uint32_t GCPtrPC32 = pCtx->eip;
686 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
687 if (GCPtrPC32 > pCtx->cs.u32Limit)
688 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
689 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
690 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
691 }
692
693 RTGCPHYS GCPhys;
694 uint64_t fFlags;
695 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
696 if (RT_FAILURE(rc))
697 {
698 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
699 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
700 }
701 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
702 {
703 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
704 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
705 }
706 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
707 {
708 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
709 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
710 }
711 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
712 /** @todo Check reserved bits and such stuff. PGM is better at doing
713 * that, so do it when implementing the guest virtual address
714 * TLB... */
715
716#ifdef IEM_VERIFICATION_MODE
717 /*
718 * Optimistic optimization: Use unconsumed opcode bytes from the previous
719 * instruction.
720 */
721 /** @todo optimize this differently by not using PGMPhysRead. */
722 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
723 pIemCpu->GCPhysOpcodes = GCPhys;
724 if ( offPrevOpcodes < cbOldOpcodes
725 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
726 {
727 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
728 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
729 pIemCpu->cbOpcode = cbNew;
730 return VINF_SUCCESS;
731 }
732#endif
733
734 /*
735 * Read the bytes at this address.
736 */
737 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
738 if (cbToTryRead > cbLeftOnPage)
739 cbToTryRead = cbLeftOnPage;
740 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
741 cbToTryRead = sizeof(pIemCpu->abOpcode);
742 /** @todo patch manager */
743 if (!pIemCpu->fByPassHandlers)
744 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
745 else
746 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
747 if (rc != VINF_SUCCESS)
748 {
749 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
750 return rc;
751 }
752 pIemCpu->cbOpcode = cbToTryRead;
753
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
760 * exception if it fails.
761 *
762 * @returns Strict VBox status code.
763 * @param pIemCpu The IEM state.
764 * @param cbMin Where to return the opcode byte.
765 */
766static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
767{
768 /*
769 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
770 *
771 * First translate CS:rIP to a physical address.
772 */
773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
774 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
775 uint32_t cbToTryRead;
776 RTGCPTR GCPtrNext;
777 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
778 {
779 cbToTryRead = PAGE_SIZE;
780 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
781 if (!IEM_IS_CANONICAL(GCPtrNext))
782 return iemRaiseGeneralProtectionFault0(pIemCpu);
783 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
784 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
785 }
786 else
787 {
788 uint32_t GCPtrNext32 = pCtx->eip;
789 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
790 GCPtrNext32 += pIemCpu->cbOpcode;
791 if (GCPtrNext32 > pCtx->cs.u32Limit)
792 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
793 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
794 if (cbToTryRead < cbMin - cbLeft)
795 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
796 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
797 }
798
799 RTGCPHYS GCPhys;
800 uint64_t fFlags;
801 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
802 if (RT_FAILURE(rc))
803 {
804 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
805 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
806 }
807 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
808 {
809 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
810 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
815 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
818 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
819 /** @todo Check reserved bits and such stuff. PGM is better at doing
820 * that, so do it when implementing the guest virtual address
821 * TLB... */
822
823 /*
824 * Read the bytes at this address.
825 */
826 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
827 if (cbToTryRead > cbLeftOnPage)
828 cbToTryRead = cbLeftOnPage;
829 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
830 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
831 Assert(cbToTryRead >= cbMin - cbLeft);
832 if (!pIemCpu->fByPassHandlers)
833 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
834 else
835 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
836 if (rc != VINF_SUCCESS)
837 {
838 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
839 return rc;
840 }
841 pIemCpu->cbOpcode += cbToTryRead;
842 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
843
844 return VINF_SUCCESS;
845}
846
847
848/**
849 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
850 *
851 * @returns Strict VBox status code.
852 * @param pIemCpu The IEM state.
853 * @param pb Where to return the opcode byte.
854 */
855DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
856{
857 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
858 if (rcStrict == VINF_SUCCESS)
859 {
860 uint8_t offOpcode = pIemCpu->offOpcode;
861 *pb = pIemCpu->abOpcode[offOpcode];
862 pIemCpu->offOpcode = offOpcode + 1;
863 }
864 else
865 *pb = 0;
866 return rcStrict;
867}
868
869
870/**
871 * Fetches the next opcode byte.
872 *
873 * @returns Strict VBox status code.
874 * @param pIemCpu The IEM state.
875 * @param pu8 Where to return the opcode byte.
876 */
877DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
878{
879 uint8_t const offOpcode = pIemCpu->offOpcode;
880 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
881 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
882
883 *pu8 = pIemCpu->abOpcode[offOpcode];
884 pIemCpu->offOpcode = offOpcode + 1;
885 return VINF_SUCCESS;
886}
887
888
889/**
890 * Fetches the next opcode byte, returns automatically on failure.
891 *
892 * @param a_pu8 Where to return the opcode byte.
893 * @remark Implicitly references pIemCpu.
894 */
895#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
896 do \
897 { \
898 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
899 if (rcStrict2 != VINF_SUCCESS) \
900 return rcStrict2; \
901 } while (0)
902
903
904/**
905 * Fetches the next signed byte from the opcode stream.
906 *
907 * @returns Strict VBox status code.
908 * @param pIemCpu The IEM state.
909 * @param pi8 Where to return the signed byte.
910 */
911DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
912{
913 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
914}
915
916
917/**
918 * Fetches the next signed byte from the opcode stream, returning automatically
919 * on failure.
920 *
921 * @param pi8 Where to return the signed byte.
922 * @remark Implicitly references pIemCpu.
923 */
924#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
925 do \
926 { \
927 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
928 if (rcStrict2 != VINF_SUCCESS) \
929 return rcStrict2; \
930 } while (0)
931
932
933/**
934 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
935 *
936 * @returns Strict VBox status code.
937 * @param pIemCpu The IEM state.
938 * @param pu16 Where to return the opcode dword.
939 */
940DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
941{
942 uint8_t u8;
943 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
944 if (rcStrict == VINF_SUCCESS)
945 *pu16 = (int8_t)u8;
946 return rcStrict;
947}
948
949
950/**
951 * Fetches the next signed byte from the opcode stream, extending it to
952 * unsigned 16-bit.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param pu16 Where to return the unsigned word.
957 */
958DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
959{
960 uint8_t const offOpcode = pIemCpu->offOpcode;
961 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
962 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
963
964 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
965 pIemCpu->offOpcode = offOpcode + 1;
966 return VINF_SUCCESS;
967}
968
969
970/**
971 * Fetches the next signed byte from the opcode stream and sign-extending it to
972 * a word, returning automatically on failure.
973 *
974 * @param pu16 Where to return the word.
975 * @remark Implicitly references pIemCpu.
976 */
977#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
978 do \
979 { \
980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
981 if (rcStrict2 != VINF_SUCCESS) \
982 return rcStrict2; \
983 } while (0)
984
985
986/**
987 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
988 *
989 * @returns Strict VBox status code.
990 * @param pIemCpu The IEM state.
991 * @param pu16 Where to return the opcode word.
992 */
993DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
994{
995 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
996 if (rcStrict == VINF_SUCCESS)
997 {
998 uint8_t offOpcode = pIemCpu->offOpcode;
999 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1000 pIemCpu->offOpcode = offOpcode + 2;
1001 }
1002 else
1003 *pu16 = 0;
1004 return rcStrict;
1005}
1006
1007
1008/**
1009 * Fetches the next opcode word.
1010 *
1011 * @returns Strict VBox status code.
1012 * @param pIemCpu The IEM state.
1013 * @param pu16 Where to return the opcode word.
1014 */
1015DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1016{
1017 uint8_t const offOpcode = pIemCpu->offOpcode;
1018 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1019 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1020
1021 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1022 pIemCpu->offOpcode = offOpcode + 2;
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/**
1028 * Fetches the next opcode word, returns automatically on failure.
1029 *
1030 * @param a_pu16 Where to return the opcode word.
1031 * @remark Implicitly references pIemCpu.
1032 */
1033#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1034 do \
1035 { \
1036 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1037 if (rcStrict2 != VINF_SUCCESS) \
1038 return rcStrict2; \
1039 } while (0)
1040
1041
1042/**
1043 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1044 *
1045 * @returns Strict VBox status code.
1046 * @param pIemCpu The IEM state.
1047 * @param pu32 Where to return the opcode double word.
1048 */
1049DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1050{
1051 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1052 if (rcStrict == VINF_SUCCESS)
1053 {
1054 uint8_t offOpcode = pIemCpu->offOpcode;
1055 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1056 pIemCpu->offOpcode = offOpcode + 2;
1057 }
1058 else
1059 *pu32 = 0;
1060 return rcStrict;
1061}
1062
1063
1064/**
1065 * Fetches the next opcode word, zero extending it to a double word.
1066 *
1067 * @returns Strict VBox status code.
1068 * @param pIemCpu The IEM state.
1069 * @param pu32 Where to return the opcode double word.
1070 */
1071DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1072{
1073 uint8_t const offOpcode = pIemCpu->offOpcode;
1074 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1075 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1076
1077 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1078 pIemCpu->offOpcode = offOpcode + 2;
1079 return VINF_SUCCESS;
1080}
1081
1082
1083/**
1084 * Fetches the next opcode word and zero extends it to a double word, returns
1085 * automatically on failure.
1086 *
1087 * @param a_pu32 Where to return the opcode double word.
1088 * @remark Implicitly references pIemCpu.
1089 */
1090#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1091 do \
1092 { \
1093 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1094 if (rcStrict2 != VINF_SUCCESS) \
1095 return rcStrict2; \
1096 } while (0)
1097
1098
1099/**
1100 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1101 *
1102 * @returns Strict VBox status code.
1103 * @param pIemCpu The IEM state.
1104 * @param pu64 Where to return the opcode quad word.
1105 */
1106DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1107{
1108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1109 if (rcStrict == VINF_SUCCESS)
1110 {
1111 uint8_t offOpcode = pIemCpu->offOpcode;
1112 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1113 pIemCpu->offOpcode = offOpcode + 2;
1114 }
1115 else
1116 *pu64 = 0;
1117 return rcStrict;
1118}
1119
1120
1121/**
1122 * Fetches the next opcode word, zero extending it to a quad word.
1123 *
1124 * @returns Strict VBox status code.
1125 * @param pIemCpu The IEM state.
1126 * @param pu64 Where to return the opcode quad word.
1127 */
1128DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1129{
1130 uint8_t const offOpcode = pIemCpu->offOpcode;
1131 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1132 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1133
1134 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1135 pIemCpu->offOpcode = offOpcode + 2;
1136 return VINF_SUCCESS;
1137}
1138
1139
1140/**
1141 * Fetches the next opcode word and zero extends it to a quad word, returns
1142 * automatically on failure.
1143 *
1144 * @param a_pu64 Where to return the opcode quad word.
1145 * @remark Implicitly references pIemCpu.
1146 */
1147#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1148 do \
1149 { \
1150 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1151 if (rcStrict2 != VINF_SUCCESS) \
1152 return rcStrict2; \
1153 } while (0)
1154
1155
1156/**
1157 * Fetches the next signed word from the opcode stream.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pIemCpu The IEM state.
1161 * @param pi16 Where to return the signed word.
1162 */
1163DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1164{
1165 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1166}
1167
1168
1169/**
1170 * Fetches the next signed word from the opcode stream, returning automatically
1171 * on failure.
1172 *
1173 * @param pi16 Where to return the signed word.
1174 * @remark Implicitly references pIemCpu.
1175 */
1176#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1177 do \
1178 { \
1179 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1180 if (rcStrict2 != VINF_SUCCESS) \
1181 return rcStrict2; \
1182 } while (0)
1183
1184
1185/**
1186 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1187 *
1188 * @returns Strict VBox status code.
1189 * @param pIemCpu The IEM state.
1190 * @param pu32 Where to return the opcode dword.
1191 */
1192DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1193{
1194 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1195 if (rcStrict == VINF_SUCCESS)
1196 {
1197 uint8_t offOpcode = pIemCpu->offOpcode;
1198 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1199 pIemCpu->abOpcode[offOpcode + 1],
1200 pIemCpu->abOpcode[offOpcode + 2],
1201 pIemCpu->abOpcode[offOpcode + 3]);
1202 pIemCpu->offOpcode = offOpcode + 4;
1203 }
1204 else
1205 *pu32 = 0;
1206 return rcStrict;
1207}
1208
1209
1210/**
1211 * Fetches the next opcode dword.
1212 *
1213 * @returns Strict VBox status code.
1214 * @param pIemCpu The IEM state.
1215 * @param pu32 Where to return the opcode double word.
1216 */
1217DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1218{
1219 uint8_t const offOpcode = pIemCpu->offOpcode;
1220 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1221 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1222
1223 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1224 pIemCpu->abOpcode[offOpcode + 1],
1225 pIemCpu->abOpcode[offOpcode + 2],
1226 pIemCpu->abOpcode[offOpcode + 3]);
1227 pIemCpu->offOpcode = offOpcode + 4;
1228 return VINF_SUCCESS;
1229}
1230
1231
1232/**
1233 * Fetches the next opcode dword, returns automatically on failure.
1234 *
1235 * @param a_pu32 Where to return the opcode dword.
1236 * @remark Implicitly references pIemCpu.
1237 */
1238#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1239 do \
1240 { \
1241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1242 if (rcStrict2 != VINF_SUCCESS) \
1243 return rcStrict2; \
1244 } while (0)
1245
1246
1247/**
1248 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1249 *
1250 * @returns Strict VBox status code.
1251 * @param pIemCpu The IEM state.
1252 * @param pu32 Where to return the opcode dword.
1253 */
1254DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1255{
1256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1257 if (rcStrict == VINF_SUCCESS)
1258 {
1259 uint8_t offOpcode = pIemCpu->offOpcode;
1260 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1261 pIemCpu->abOpcode[offOpcode + 1],
1262 pIemCpu->abOpcode[offOpcode + 2],
1263 pIemCpu->abOpcode[offOpcode + 3]);
1264 pIemCpu->offOpcode = offOpcode + 4;
1265 }
1266 else
1267 *pu64 = 0;
1268 return rcStrict;
1269}
1270
1271
1272/**
1273 * Fetches the next opcode dword, zero extending it to a quad word.
1274 *
1275 * @returns Strict VBox status code.
1276 * @param pIemCpu The IEM state.
1277 * @param pu64 Where to return the opcode quad word.
1278 */
1279DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1280{
1281 uint8_t const offOpcode = pIemCpu->offOpcode;
1282 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1283 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1284
1285 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1286 pIemCpu->abOpcode[offOpcode + 1],
1287 pIemCpu->abOpcode[offOpcode + 2],
1288 pIemCpu->abOpcode[offOpcode + 3]);
1289 pIemCpu->offOpcode = offOpcode + 4;
1290 return VINF_SUCCESS;
1291}
1292
1293
1294/**
1295 * Fetches the next opcode dword and zero extends it to a quad word, returns
1296 * automatically on failure.
1297 *
1298 * @param a_pu64 Where to return the opcode quad word.
1299 * @remark Implicitly references pIemCpu.
1300 */
1301#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1302 do \
1303 { \
1304 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1305 if (rcStrict2 != VINF_SUCCESS) \
1306 return rcStrict2; \
1307 } while (0)
1308
1309
1310/**
1311 * Fetches the next signed double word from the opcode stream.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pIemCpu The IEM state.
1315 * @param pi32 Where to return the signed double word.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1318{
1319 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1320}
1321
1322/**
1323 * Fetches the next signed double word from the opcode stream, returning
1324 * automatically on failure.
1325 *
1326 * @param pi32 Where to return the signed double word.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu64 Where to return the opcode qword.
1344 */
1345DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1346{
1347 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1348 if (rcStrict == VINF_SUCCESS)
1349 {
1350 uint8_t offOpcode = pIemCpu->offOpcode;
1351 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1352 pIemCpu->abOpcode[offOpcode + 1],
1353 pIemCpu->abOpcode[offOpcode + 2],
1354 pIemCpu->abOpcode[offOpcode + 3]);
1355 pIemCpu->offOpcode = offOpcode + 4;
1356 }
1357 else
1358 *pu64 = 0;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Fetches the next opcode dword, sign extending it into a quad word.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pIemCpu The IEM state.
1368 * @param pu64 Where to return the opcode quad word.
1369 */
1370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1371{
1372 uint8_t const offOpcode = pIemCpu->offOpcode;
1373 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1374 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1375
1376 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1377 pIemCpu->abOpcode[offOpcode + 1],
1378 pIemCpu->abOpcode[offOpcode + 2],
1379 pIemCpu->abOpcode[offOpcode + 3]);
1380 *pu64 = i32;
1381 pIemCpu->offOpcode = offOpcode + 4;
1382 return VINF_SUCCESS;
1383}
1384
1385
1386/**
1387 * Fetches the next opcode double word and sign extends it to a quad word,
1388 * returns automatically on failure.
1389 *
1390 * @param a_pu64 Where to return the opcode quad word.
1391 * @remark Implicitly references pIemCpu.
1392 */
1393#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1394 do \
1395 { \
1396 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1397 if (rcStrict2 != VINF_SUCCESS) \
1398 return rcStrict2; \
1399 } while (0)
1400
1401
1402/**
1403 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1404 *
1405 * @returns Strict VBox status code.
1406 * @param pIemCpu The IEM state.
1407 * @param pu64 Where to return the opcode qword.
1408 */
1409DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1410{
1411 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1412 if (rcStrict == VINF_SUCCESS)
1413 {
1414 uint8_t offOpcode = pIemCpu->offOpcode;
1415 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1416 pIemCpu->abOpcode[offOpcode + 1],
1417 pIemCpu->abOpcode[offOpcode + 2],
1418 pIemCpu->abOpcode[offOpcode + 3],
1419 pIemCpu->abOpcode[offOpcode + 4],
1420 pIemCpu->abOpcode[offOpcode + 5],
1421 pIemCpu->abOpcode[offOpcode + 6],
1422 pIemCpu->abOpcode[offOpcode + 7]);
1423 pIemCpu->offOpcode = offOpcode + 8;
1424 }
1425 else
1426 *pu64 = 0;
1427 return rcStrict;
1428}
1429
1430
1431/**
1432 * Fetches the next opcode qword.
1433 *
1434 * @returns Strict VBox status code.
1435 * @param pIemCpu The IEM state.
1436 * @param pu64 Where to return the opcode qword.
1437 */
1438DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1439{
1440 uint8_t const offOpcode = pIemCpu->offOpcode;
1441 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1442 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1443
1444 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1445 pIemCpu->abOpcode[offOpcode + 1],
1446 pIemCpu->abOpcode[offOpcode + 2],
1447 pIemCpu->abOpcode[offOpcode + 3],
1448 pIemCpu->abOpcode[offOpcode + 4],
1449 pIemCpu->abOpcode[offOpcode + 5],
1450 pIemCpu->abOpcode[offOpcode + 6],
1451 pIemCpu->abOpcode[offOpcode + 7]);
1452 pIemCpu->offOpcode = offOpcode + 8;
1453 return VINF_SUCCESS;
1454}
1455
1456
1457/**
1458 * Fetches the next opcode quad word, returns automatically on failure.
1459 *
1460 * @param a_pu64 Where to return the opcode quad word.
1461 * @remark Implicitly references pIemCpu.
1462 */
1463#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1464 do \
1465 { \
1466 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1467 if (rcStrict2 != VINF_SUCCESS) \
1468 return rcStrict2; \
1469 } while (0)
1470
1471
1472/** @name Misc Worker Functions.
1473 * @{
1474 */
1475
1476
1477/**
1478 * Validates a new SS segment.
1479 *
1480 * @returns VBox strict status code.
1481 * @param pIemCpu The IEM per CPU instance data.
1482 * @param pCtx The CPU context.
1483 * @param NewSS The new SS selctor.
1484 * @param uCpl The CPL to load the stack for.
1485 * @param pDesc Where to return the descriptor.
1486 */
1487static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1488{
1489 NOREF(pCtx);
1490
1491 /* Null selectors are not allowed (we're not called for dispatching
1492 interrupts with SS=0 in long mode). */
1493 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1494 {
1495 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1496 return iemRaiseGeneralProtectionFault0(pIemCpu);
1497 }
1498
1499 /*
1500 * Read the descriptor.
1501 */
1502 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1503 if (rcStrict != VINF_SUCCESS)
1504 return rcStrict;
1505
1506 /*
1507 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1508 */
1509 if (!pDesc->Legacy.Gen.u1DescType)
1510 {
1511 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1512 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1513 }
1514
1515 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1516 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1517 {
1518 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1519 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1520 }
1521 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1522 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1523 {
1524 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1525 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1526 }
1527 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1528 if ((NewSS & X86_SEL_RPL) != uCpl)
1529 {
1530 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1531 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1532 }
1533 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1534 {
1535 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1536 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1537 }
1538
1539 /* Is it there? */
1540 /** @todo testcase: Is this checked before the canonical / limit check below? */
1541 if (!pDesc->Legacy.Gen.u1Present)
1542 {
1543 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1544 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1545 }
1546
1547 return VINF_SUCCESS;
1548}
1549
1550
1551/** @} */
1552
1553/** @name Raising Exceptions.
1554 *
1555 * @{
1556 */
1557
1558/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1559 * @{ */
1560/** CPU exception. */
1561#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1562/** External interrupt (from PIC, APIC, whatever). */
1563#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1564/** Software interrupt (int, into or bound). */
1565#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1566/** Takes an error code. */
1567#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1568/** Takes a CR2. */
1569#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1570/** Generated by the breakpoint instruction. */
1571#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1572/** @} */
1573
1574/**
1575 * Loads the specified stack far pointer from the TSS.
1576 *
1577 * @returns VBox strict status code.
1578 * @param pIemCpu The IEM per CPU instance data.
1579 * @param pCtx The CPU context.
1580 * @param uCpl The CPL to load the stack for.
1581 * @param pSelSS Where to return the new stack segment.
1582 * @param puEsp Where to return the new stack pointer.
1583 */
1584static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1585 PRTSEL pSelSS, uint32_t *puEsp)
1586{
1587 VBOXSTRICTRC rcStrict;
1588 Assert(uCpl < 4);
1589 *puEsp = 0; /* make gcc happy */
1590 *pSelSS = 0; /* make gcc happy */
1591
1592 switch (pCtx->tr.Attr.n.u4Type)
1593 {
1594 /*
1595 * 16-bit TSS (X86TSS16).
1596 */
1597 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1598 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1599 {
1600 uint32_t off = uCpl * 4 + 2;
1601 if (off + 4 > pCtx->tr.u32Limit)
1602 {
1603 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1604 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1605 }
1606
1607 uint32_t u32Tmp = 0; /* gcc maybe... */
1608 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1609 if (rcStrict == VINF_SUCCESS)
1610 {
1611 *puEsp = RT_LOWORD(u32Tmp);
1612 *pSelSS = RT_HIWORD(u32Tmp);
1613 return VINF_SUCCESS;
1614 }
1615 break;
1616 }
1617
1618 /*
1619 * 32-bit TSS (X86TSS32).
1620 */
1621 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1622 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1623 {
1624 uint32_t off = uCpl * 8 + 4;
1625 if (off + 7 > pCtx->tr.u32Limit)
1626 {
1627 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1628 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1629 }
1630
1631 uint64_t u64Tmp;
1632 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1633 if (rcStrict == VINF_SUCCESS)
1634 {
1635 *puEsp = u64Tmp & UINT32_MAX;
1636 *pSelSS = (RTSEL)(u64Tmp >> 32);
1637 return VINF_SUCCESS;
1638 }
1639 break;
1640 }
1641
1642 default:
1643 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1644 }
1645 return rcStrict;
1646}
1647
1648
1649/**
1650 * Adjust the CPU state according to the exception being raised.
1651 *
1652 * @param pCtx The CPU context.
1653 * @param u8Vector The exception that has been raised.
1654 */
1655DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1656{
1657 switch (u8Vector)
1658 {
1659 case X86_XCPT_DB:
1660 pCtx->dr[7] &= ~X86_DR7_GD;
1661 break;
1662 /** @todo Read the AMD and Intel exception reference... */
1663 }
1664}
1665
1666
1667/**
1668 * Implements exceptions and interrupts for real mode.
1669 *
1670 * @returns VBox strict status code.
1671 * @param pIemCpu The IEM per CPU instance data.
1672 * @param pCtx The CPU context.
1673 * @param cbInstr The number of bytes to offset rIP by in the return
1674 * address.
1675 * @param u8Vector The interrupt / exception vector number.
1676 * @param fFlags The flags.
1677 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1678 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1679 */
1680static VBOXSTRICTRC
1681iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1682 PCPUMCTX pCtx,
1683 uint8_t cbInstr,
1684 uint8_t u8Vector,
1685 uint32_t fFlags,
1686 uint16_t uErr,
1687 uint64_t uCr2)
1688{
1689 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1690 NOREF(uErr); NOREF(uCr2);
1691
1692 /*
1693 * Read the IDT entry.
1694 */
1695 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1696 {
1697 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1698 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1699 }
1700 RTFAR16 Idte;
1701 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1702 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1703 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1704 return rcStrict;
1705
1706 /*
1707 * Push the stack frame.
1708 */
1709 uint16_t *pu16Frame;
1710 uint64_t uNewRsp;
1711 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1712 if (rcStrict != VINF_SUCCESS)
1713 return rcStrict;
1714
1715 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1716 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
1717 pu16Frame[0] = pCtx->ip + cbInstr;
1718 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1719 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1720 return rcStrict;
1721
1722 /*
1723 * Load the vector address into cs:ip and make exception specific state
1724 * adjustments.
1725 */
1726 pCtx->cs.Sel = Idte.sel;
1727 pCtx->cs.ValidSel = Idte.sel;
1728 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1729 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
1730 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1731 pCtx->rip = Idte.off;
1732 pCtx->eflags.Bits.u1IF = 0;
1733
1734 /** @todo do we actually do this in real mode? */
1735 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1736 iemRaiseXcptAdjustState(pCtx, u8Vector);
1737
1738 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1739}
1740
1741
1742/**
1743 * Implements exceptions and interrupts for protected mode.
1744 *
1745 * @returns VBox strict status code.
1746 * @param pIemCpu The IEM per CPU instance data.
1747 * @param pCtx The CPU context.
1748 * @param cbInstr The number of bytes to offset rIP by in the return
1749 * address.
1750 * @param u8Vector The interrupt / exception vector number.
1751 * @param fFlags The flags.
1752 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1753 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1754 */
1755static VBOXSTRICTRC
1756iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1757 PCPUMCTX pCtx,
1758 uint8_t cbInstr,
1759 uint8_t u8Vector,
1760 uint32_t fFlags,
1761 uint16_t uErr,
1762 uint64_t uCr2)
1763{
1764 NOREF(cbInstr);
1765
1766 /*
1767 * Read the IDT entry.
1768 */
1769 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1770 {
1771 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1772 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1773 }
1774 X86DESC Idte;
1775 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1776 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1777 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1778 return rcStrict;
1779 LogFlow(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
1780 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1781 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
1782
1783 /*
1784 * Check the descriptor type, DPL and such.
1785 * ASSUMES this is done in the same order as described for call-gate calls.
1786 */
1787 if (Idte.Gate.u1DescType)
1788 {
1789 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1790 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1791 }
1792 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1793 switch (Idte.Gate.u4Type)
1794 {
1795 case X86_SEL_TYPE_SYS_UNDEFINED:
1796 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1797 case X86_SEL_TYPE_SYS_LDT:
1798 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1799 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1800 case X86_SEL_TYPE_SYS_UNDEFINED2:
1801 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1802 case X86_SEL_TYPE_SYS_UNDEFINED3:
1803 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1804 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1805 case X86_SEL_TYPE_SYS_UNDEFINED4:
1806 {
1807 /** @todo check what actually happens when the type is wrong...
1808 * esp. call gates. */
1809 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1810 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1811 }
1812
1813 case X86_SEL_TYPE_SYS_286_INT_GATE:
1814 case X86_SEL_TYPE_SYS_386_INT_GATE:
1815 fEflToClear |= X86_EFL_IF;
1816 break;
1817
1818 case X86_SEL_TYPE_SYS_TASK_GATE:
1819 /** @todo task gates. */
1820 AssertFailedReturn(VERR_NOT_SUPPORTED);
1821
1822 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1823 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1824 break;
1825
1826 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1827 }
1828
1829 /* Check DPL against CPL if applicable. */
1830 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1831 {
1832 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1833 {
1834 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1835 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1836 }
1837 }
1838
1839 /* Is it there? */
1840 if (!Idte.Gate.u1Present)
1841 {
1842 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1843 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1844 }
1845
1846 /* A null CS is bad. */
1847 RTSEL NewCS = Idte.Gate.u16Sel;
1848 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1849 {
1850 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1851 return iemRaiseGeneralProtectionFault0(pIemCpu);
1852 }
1853
1854 /* Fetch the descriptor for the new CS. */
1855 IEMSELDESC DescCS;
1856 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1857 if (rcStrict != VINF_SUCCESS)
1858 {
1859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1860 return rcStrict;
1861 }
1862
1863 /* Must be a code segment. */
1864 if (!DescCS.Legacy.Gen.u1DescType)
1865 {
1866 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1867 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1868 }
1869 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1870 {
1871 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1872 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1873 }
1874
1875 /* Don't allow lowering the privilege level. */
1876 /** @todo Does the lowering of privileges apply to software interrupts
1877 * only? This has bearings on the more-privileged or
1878 * same-privilege stack behavior further down. A testcase would
1879 * be nice. */
1880 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1881 {
1882 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1883 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1884 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1885 }
1886 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1887
1888 /* Check the new EIP against the new CS limit. */
1889 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1890 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1891 ? Idte.Gate.u16OffsetLow
1892 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1893 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1894 if (DescCS.Legacy.Gen.u1Granularity)
1895 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1896 if (uNewEip > cbLimitCS)
1897 {
1898 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1899 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1900 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1901 }
1902
1903 /* Make sure the selector is present. */
1904 if (!DescCS.Legacy.Gen.u1Present)
1905 {
1906 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1907 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1908 }
1909
1910 /*
1911 * If the privilege level changes, we need to get a new stack from the TSS.
1912 * This in turns means validating the new SS and ESP...
1913 */
1914 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1915 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1916 if (uNewCpl != pIemCpu->uCpl)
1917 {
1918 RTSEL NewSS;
1919 uint32_t uNewEsp;
1920 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1921 if (rcStrict != VINF_SUCCESS)
1922 return rcStrict;
1923
1924 IEMSELDESC DescSS;
1925 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1926 if (rcStrict != VINF_SUCCESS)
1927 return rcStrict;
1928
1929 /* Check that there is sufficient space for the stack frame. */
1930 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1931 if (DescSS.Legacy.Gen.u1Granularity)
1932 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1933 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1934
1935 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1936 if ( uNewEsp - 1 > cbLimitSS
1937 || uNewEsp < cbStackFrame)
1938 {
1939 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1940 u8Vector, NewSS, uNewEsp, cbStackFrame));
1941 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1942 }
1943
1944 /*
1945 * Start making changes.
1946 */
1947
1948 /* Create the stack frame. */
1949 RTPTRUNION uStackFrame;
1950 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1951 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1952 if (rcStrict != VINF_SUCCESS)
1953 return rcStrict;
1954 void * const pvStackFrame = uStackFrame.pv;
1955
1956 if (fFlags & IEM_XCPT_FLAGS_ERR)
1957 *uStackFrame.pu32++ = uErr;
1958 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1959 ? pCtx->eip + cbInstr : pCtx->eip;
1960 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
1961 uStackFrame.pu32[2] = pCtx->eflags.u;
1962 uStackFrame.pu32[3] = pCtx->esp;
1963 uStackFrame.pu32[4] = pCtx->ss.Sel;
1964 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1965 if (rcStrict != VINF_SUCCESS)
1966 return rcStrict;
1967
1968 /* Mark the selectors 'accessed' (hope this is the correct time). */
1969 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1970 * after pushing the stack frame? (Write protect the gdt + stack to
1971 * find out.) */
1972 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1973 {
1974 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1975 if (rcStrict != VINF_SUCCESS)
1976 return rcStrict;
1977 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1978 }
1979
1980 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1981 {
1982 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1983 if (rcStrict != VINF_SUCCESS)
1984 return rcStrict;
1985 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1986 }
1987
1988 /*
1989 * Start comitting the register changes (joins with the DPL=CPL branch).
1990 */
1991 pCtx->ss.Sel = NewSS;
1992 pCtx->ss.ValidSel = NewSS;
1993 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1994 pCtx->ss.u32Limit = cbLimitSS;
1995 pCtx->ss.u64Base = X86DESC_BASE(DescSS.Legacy);
1996 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1997 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1998 pIemCpu->uCpl = uNewCpl;
1999 }
2000 /*
2001 * Same privilege, no stack change and smaller stack frame.
2002 */
2003 else
2004 {
2005 uint64_t uNewRsp;
2006 RTPTRUNION uStackFrame;
2007 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2008 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2009 if (rcStrict != VINF_SUCCESS)
2010 return rcStrict;
2011 void * const pvStackFrame = uStackFrame.pv;
2012
2013 if (fFlags & IEM_XCPT_FLAGS_ERR)
2014 *uStackFrame.pu32++ = uErr;
2015 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2016 ? pCtx->eip + cbInstr : pCtx->eip;
2017 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2018 uStackFrame.pu32[2] = pCtx->eflags.u;
2019 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2020 if (rcStrict != VINF_SUCCESS)
2021 return rcStrict;
2022
2023 /* Mark the CS selector as 'accessed'. */
2024 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2025 {
2026 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2030 }
2031
2032 /*
2033 * Start committing the register changes (joins with the other branch).
2034 */
2035 pCtx->rsp = uNewRsp;
2036 }
2037
2038 /* ... register committing continues. */
2039 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2040 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2041 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2042 pCtx->cs.u32Limit = cbLimitCS;
2043 pCtx->cs.u64Base = X86DESC_BASE(DescCS.Legacy);
2044 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2045
2046 pCtx->rip = uNewEip;
2047 pCtx->rflags.u &= ~fEflToClear;
2048
2049 if (fFlags & IEM_XCPT_FLAGS_CR2)
2050 pCtx->cr2 = uCr2;
2051
2052 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2053 iemRaiseXcptAdjustState(pCtx, u8Vector);
2054
2055 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2056}
2057
2058
2059/**
2060 * Implements exceptions and interrupts for V8086 mode.
2061 *
2062 * @returns VBox strict status code.
2063 * @param pIemCpu The IEM per CPU instance data.
2064 * @param pCtx The CPU context.
2065 * @param cbInstr The number of bytes to offset rIP by in the return
2066 * address.
2067 * @param u8Vector The interrupt / exception vector number.
2068 * @param fFlags The flags.
2069 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2070 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2071 */
2072static VBOXSTRICTRC
2073iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2074 PCPUMCTX pCtx,
2075 uint8_t cbInstr,
2076 uint8_t u8Vector,
2077 uint32_t fFlags,
2078 uint16_t uErr,
2079 uint64_t uCr2)
2080{
2081 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2082 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2083 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2084}
2085
2086
2087/**
2088 * Implements exceptions and interrupts for long mode.
2089 *
2090 * @returns VBox strict status code.
2091 * @param pIemCpu The IEM per CPU instance data.
2092 * @param pCtx The CPU context.
2093 * @param cbInstr The number of bytes to offset rIP by in the return
2094 * address.
2095 * @param u8Vector The interrupt / exception vector number.
2096 * @param fFlags The flags.
2097 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2098 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2099 */
2100static VBOXSTRICTRC
2101iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2102 PCPUMCTX pCtx,
2103 uint8_t cbInstr,
2104 uint8_t u8Vector,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2)
2108{
2109 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2110 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2111 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2112}
2113
2114
2115/**
2116 * Implements exceptions and interrupts.
2117 *
2118 * All exceptions and interrupts goes thru this function!
2119 *
2120 * @returns VBox strict status code.
2121 * @param pIemCpu The IEM per CPU instance data.
2122 * @param cbInstr The number of bytes to offset rIP by in the return
2123 * address.
2124 * @param u8Vector The interrupt / exception vector number.
2125 * @param fFlags The flags.
2126 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2127 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2128 */
2129DECL_NO_INLINE(static, VBOXSTRICTRC)
2130iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2131 uint8_t cbInstr,
2132 uint8_t u8Vector,
2133 uint32_t fFlags,
2134 uint16_t uErr,
2135 uint64_t uCr2)
2136{
2137 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2138
2139 /*
2140 * Do recursion accounting.
2141 */
2142 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2143 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2144 if (pIemCpu->cXcptRecursions == 0)
2145 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2146 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2147 else
2148 {
2149 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2150 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2151
2152 /** @todo double and tripple faults. */
2153 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2154
2155 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2156 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2157 {
2158 ....
2159 } */
2160 }
2161 pIemCpu->cXcptRecursions++;
2162 pIemCpu->uCurXcpt = u8Vector;
2163 pIemCpu->fCurXcpt = fFlags;
2164
2165 /*
2166 * Extensive logging.
2167 */
2168#if defined(LOG_ENABLED) && defined(IN_RING3)
2169 if (LogIs3Enabled())
2170 {
2171 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2172 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2173 char szRegs[4096];
2174 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2175 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2176 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2177 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2178 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2179 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2180 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2181 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2182 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2183 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2184 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2185 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2186 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2187 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2188 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2189 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2190 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2191 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2192 " efer=%016VR{efer}\n"
2193 " pat=%016VR{pat}\n"
2194 " sf_mask=%016VR{sf_mask}\n"
2195 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2196 " lstar=%016VR{lstar}\n"
2197 " star=%016VR{star} cstar=%016VR{cstar}\n"
2198 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2199 );
2200
2201 char szInstr[256];
2202 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2203 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2204 szInstr, sizeof(szInstr), NULL);
2205 Log3(("%s%s\n", szRegs, szInstr));
2206 }
2207#endif /* LOG_ENABLED */
2208
2209 /*
2210 * Call the mode specific worker function.
2211 */
2212 VBOXSTRICTRC rcStrict;
2213 if (!(pCtx->cr0 & X86_CR0_PE))
2214 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2215 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2216 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2217 else if (!pCtx->eflags.Bits.u1VM)
2218 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2219 else
2220 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2221
2222 /*
2223 * Unwind.
2224 */
2225 pIemCpu->cXcptRecursions--;
2226 pIemCpu->uCurXcpt = uPrevXcpt;
2227 pIemCpu->fCurXcpt = fPrevXcpt;
2228 LogFlow(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2229 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp));
2230 return rcStrict;
2231}
2232
2233
2234/** \#DE - 00. */
2235DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2236{
2237 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2238}
2239
2240
2241/** \#DB - 01. */
2242DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2243{
2244 /** @todo set/clear RF. */
2245 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2246}
2247
2248
2249/** \#UD - 06. */
2250DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2251{
2252 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2253}
2254
2255
2256/** \#NM - 07. */
2257DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2258{
2259 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2260}
2261
2262
2263#ifdef SOME_UNUSED_FUNCTION
2264/** \#TS(err) - 0a. */
2265DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2266{
2267 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2268}
2269#endif
2270
2271
2272/** \#TS(tr) - 0a. */
2273DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2274{
2275 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2276 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2277}
2278
2279
2280/** \#NP(err) - 0b. */
2281DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2282{
2283 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2284}
2285
2286
2287/** \#NP(seg) - 0b. */
2288DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2289{
2290 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2291 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2292}
2293
2294
2295/** \#NP(sel) - 0b. */
2296DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2297{
2298 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2299 uSel & ~X86_SEL_RPL, 0);
2300}
2301
2302
2303/** \#SS(seg) - 0c. */
2304DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2305{
2306 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2307 uSel & ~X86_SEL_RPL, 0);
2308}
2309
2310
2311/** \#GP(n) - 0d. */
2312DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2313{
2314 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2315}
2316
2317
2318/** \#GP(0) - 0d. */
2319DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2320{
2321 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2322}
2323
2324
2325/** \#GP(sel) - 0d. */
2326DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2327{
2328 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2329 Sel & ~X86_SEL_RPL, 0);
2330}
2331
2332
2333/** \#GP(0) - 0d. */
2334DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2335{
2336 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2337}
2338
2339
2340/** \#GP(sel) - 0d. */
2341DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2342{
2343 NOREF(iSegReg); NOREF(fAccess);
2344 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2345 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2346}
2347
2348
2349/** \#GP(sel) - 0d. */
2350DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2351{
2352 NOREF(Sel);
2353 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2354}
2355
2356
2357/** \#GP(sel) - 0d. */
2358DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2359{
2360 NOREF(iSegReg); NOREF(fAccess);
2361 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2362}
2363
2364
2365/** \#PF(n) - 0e. */
2366DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2367{
2368 uint16_t uErr;
2369 switch (rc)
2370 {
2371 case VERR_PAGE_NOT_PRESENT:
2372 case VERR_PAGE_TABLE_NOT_PRESENT:
2373 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2374 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2375 uErr = 0;
2376 break;
2377
2378 default:
2379 AssertMsgFailed(("%Rrc\n", rc));
2380 case VERR_ACCESS_DENIED:
2381 uErr = X86_TRAP_PF_P;
2382 break;
2383
2384 /** @todo reserved */
2385 }
2386
2387 if (pIemCpu->uCpl == 3)
2388 uErr |= X86_TRAP_PF_US;
2389
2390 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2391 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2392 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2393 uErr |= X86_TRAP_PF_ID;
2394
2395 /* Note! RW access callers reporting a WRITE protection fault, will clear
2396 the READ flag before calling. So, read-modify-write accesses (RW)
2397 can safely be reported as READ faults. */
2398 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2399 uErr |= X86_TRAP_PF_RW;
2400
2401 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2402 uErr, GCPtrWhere);
2403}
2404
2405
2406/** \#MF(0) - 10. */
2407DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2408{
2409 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2410}
2411
2412
2413/** \#AC(0) - 11. */
2414DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2415{
2416 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2417}
2418
2419
2420/**
2421 * Macro for calling iemCImplRaiseDivideError().
2422 *
2423 * This enables us to add/remove arguments and force different levels of
2424 * inlining as we wish.
2425 *
2426 * @return Strict VBox status code.
2427 */
2428#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2429IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2430{
2431 NOREF(cbInstr);
2432 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2433}
2434
2435
2436/**
2437 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2438 *
2439 * This enables us to add/remove arguments and force different levels of
2440 * inlining as we wish.
2441 *
2442 * @return Strict VBox status code.
2443 */
2444#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2445IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2446{
2447 NOREF(cbInstr);
2448 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2449}
2450
2451
2452/**
2453 * Macro for calling iemCImplRaiseInvalidOpcode().
2454 *
2455 * This enables us to add/remove arguments and force different levels of
2456 * inlining as we wish.
2457 *
2458 * @return Strict VBox status code.
2459 */
2460#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2461IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2462{
2463 NOREF(cbInstr);
2464 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2465}
2466
2467
2468/** @} */
2469
2470
2471/*
2472 *
2473 * Helpers routines.
2474 * Helpers routines.
2475 * Helpers routines.
2476 *
2477 */
2478
2479/**
2480 * Recalculates the effective operand size.
2481 *
2482 * @param pIemCpu The IEM state.
2483 */
2484static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2485{
2486 switch (pIemCpu->enmCpuMode)
2487 {
2488 case IEMMODE_16BIT:
2489 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2490 break;
2491 case IEMMODE_32BIT:
2492 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2493 break;
2494 case IEMMODE_64BIT:
2495 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2496 {
2497 case 0:
2498 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2499 break;
2500 case IEM_OP_PRF_SIZE_OP:
2501 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2502 break;
2503 case IEM_OP_PRF_SIZE_REX_W:
2504 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2505 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2506 break;
2507 }
2508 break;
2509 default:
2510 AssertFailed();
2511 }
2512}
2513
2514
2515/**
2516 * Sets the default operand size to 64-bit and recalculates the effective
2517 * operand size.
2518 *
2519 * @param pIemCpu The IEM state.
2520 */
2521static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2522{
2523 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2524 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2525 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2526 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2527 else
2528 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2529}
2530
2531
2532/*
2533 *
2534 * Common opcode decoders.
2535 * Common opcode decoders.
2536 * Common opcode decoders.
2537 *
2538 */
2539//#include <iprt/mem.h>
2540
2541/**
2542 * Used to add extra details about a stub case.
2543 * @param pIemCpu The IEM per CPU state.
2544 */
2545static void iemOpStubMsg2(PIEMCPU pIemCpu)
2546{
2547#if defined(LOG_ENABLED) && defined(IN_RING3)
2548 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2549 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2550 char szRegs[4096];
2551 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2552 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2553 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2554 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2555 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2556 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2557 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2558 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2559 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2560 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2561 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2562 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2563 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2564 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2565 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2566 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2567 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2568 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2569 " efer=%016VR{efer}\n"
2570 " pat=%016VR{pat}\n"
2571 " sf_mask=%016VR{sf_mask}\n"
2572 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2573 " lstar=%016VR{lstar}\n"
2574 " star=%016VR{star} cstar=%016VR{cstar}\n"
2575 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2576 );
2577
2578 char szInstr[256];
2579 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2580 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2581 szInstr, sizeof(szInstr), NULL);
2582
2583 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2584#else
2585 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2586#endif
2587}
2588
2589
2590/** Stubs an opcode. */
2591#define FNIEMOP_STUB(a_Name) \
2592 FNIEMOP_DEF(a_Name) \
2593 { \
2594 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2595 iemOpStubMsg2(pIemCpu); \
2596 RTAssertPanic(); \
2597 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2598 } \
2599 typedef int ignore_semicolon
2600
2601/** Stubs an opcode. */
2602#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2603 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2604 { \
2605 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2606 iemOpStubMsg2(pIemCpu); \
2607 RTAssertPanic(); \
2608 NOREF(a_Name0); \
2609 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2610 } \
2611 typedef int ignore_semicolon
2612
2613/** Stubs an opcode which currently should raise \#UD. */
2614#define FNIEMOP_UD_STUB(a_Name) \
2615 FNIEMOP_DEF(a_Name) \
2616 { \
2617 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2618 return IEMOP_RAISE_INVALID_OPCODE(); \
2619 } \
2620 typedef int ignore_semicolon
2621
2622/** Stubs an opcode which currently should raise \#UD. */
2623#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2624 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2625 { \
2626 NOREF(a_Name0); \
2627 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2628 return IEMOP_RAISE_INVALID_OPCODE(); \
2629 } \
2630 typedef int ignore_semicolon
2631
2632
2633
2634/** @name Register Access.
2635 * @{
2636 */
2637
2638/**
2639 * Gets a reference (pointer) to the specified hidden segment register.
2640 *
2641 * @returns Hidden register reference.
2642 * @param pIemCpu The per CPU data.
2643 * @param iSegReg The segment register.
2644 */
2645static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2646{
2647 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2648 switch (iSegReg)
2649 {
2650 case X86_SREG_ES: return &pCtx->es;
2651 case X86_SREG_CS: return &pCtx->cs;
2652 case X86_SREG_SS: return &pCtx->ss;
2653 case X86_SREG_DS: return &pCtx->ds;
2654 case X86_SREG_FS: return &pCtx->fs;
2655 case X86_SREG_GS: return &pCtx->gs;
2656 }
2657 AssertFailedReturn(NULL);
2658}
2659
2660
2661/**
2662 * Gets a reference (pointer) to the specified segment register (the selector
2663 * value).
2664 *
2665 * @returns Pointer to the selector variable.
2666 * @param pIemCpu The per CPU data.
2667 * @param iSegReg The segment register.
2668 */
2669static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2670{
2671 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2672 switch (iSegReg)
2673 {
2674 case X86_SREG_ES: return &pCtx->es.Sel;
2675 case X86_SREG_CS: return &pCtx->cs.Sel;
2676 case X86_SREG_SS: return &pCtx->ss.Sel;
2677 case X86_SREG_DS: return &pCtx->ds.Sel;
2678 case X86_SREG_FS: return &pCtx->fs.Sel;
2679 case X86_SREG_GS: return &pCtx->gs.Sel;
2680 }
2681 AssertFailedReturn(NULL);
2682}
2683
2684
2685/**
2686 * Fetches the selector value of a segment register.
2687 *
2688 * @returns The selector value.
2689 * @param pIemCpu The per CPU data.
2690 * @param iSegReg The segment register.
2691 */
2692static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2693{
2694 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2695 switch (iSegReg)
2696 {
2697 case X86_SREG_ES: return pCtx->es.Sel;
2698 case X86_SREG_CS: return pCtx->cs.Sel;
2699 case X86_SREG_SS: return pCtx->ss.Sel;
2700 case X86_SREG_DS: return pCtx->ds.Sel;
2701 case X86_SREG_FS: return pCtx->fs.Sel;
2702 case X86_SREG_GS: return pCtx->gs.Sel;
2703 }
2704 AssertFailedReturn(0xffff);
2705}
2706
2707
2708/**
2709 * Gets a reference (pointer) to the specified general register.
2710 *
2711 * @returns Register reference.
2712 * @param pIemCpu The per CPU data.
2713 * @param iReg The general register.
2714 */
2715static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2716{
2717 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2718 switch (iReg)
2719 {
2720 case X86_GREG_xAX: return &pCtx->rax;
2721 case X86_GREG_xCX: return &pCtx->rcx;
2722 case X86_GREG_xDX: return &pCtx->rdx;
2723 case X86_GREG_xBX: return &pCtx->rbx;
2724 case X86_GREG_xSP: return &pCtx->rsp;
2725 case X86_GREG_xBP: return &pCtx->rbp;
2726 case X86_GREG_xSI: return &pCtx->rsi;
2727 case X86_GREG_xDI: return &pCtx->rdi;
2728 case X86_GREG_x8: return &pCtx->r8;
2729 case X86_GREG_x9: return &pCtx->r9;
2730 case X86_GREG_x10: return &pCtx->r10;
2731 case X86_GREG_x11: return &pCtx->r11;
2732 case X86_GREG_x12: return &pCtx->r12;
2733 case X86_GREG_x13: return &pCtx->r13;
2734 case X86_GREG_x14: return &pCtx->r14;
2735 case X86_GREG_x15: return &pCtx->r15;
2736 }
2737 AssertFailedReturn(NULL);
2738}
2739
2740
2741/**
2742 * Gets a reference (pointer) to the specified 8-bit general register.
2743 *
2744 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2745 *
2746 * @returns Register reference.
2747 * @param pIemCpu The per CPU data.
2748 * @param iReg The register.
2749 */
2750static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2751{
2752 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2753 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2754
2755 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2756 if (iReg >= 4)
2757 pu8Reg++;
2758 return pu8Reg;
2759}
2760
2761
2762/**
2763 * Fetches the value of a 8-bit general register.
2764 *
2765 * @returns The register value.
2766 * @param pIemCpu The per CPU data.
2767 * @param iReg The register.
2768 */
2769static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2770{
2771 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2772 return *pbSrc;
2773}
2774
2775
2776/**
2777 * Fetches the value of a 16-bit general register.
2778 *
2779 * @returns The register value.
2780 * @param pIemCpu The per CPU data.
2781 * @param iReg The register.
2782 */
2783static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2784{
2785 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2786}
2787
2788
2789/**
2790 * Fetches the value of a 32-bit general register.
2791 *
2792 * @returns The register value.
2793 * @param pIemCpu The per CPU data.
2794 * @param iReg The register.
2795 */
2796static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2797{
2798 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2799}
2800
2801
2802/**
2803 * Fetches the value of a 64-bit general register.
2804 *
2805 * @returns The register value.
2806 * @param pIemCpu The per CPU data.
2807 * @param iReg The register.
2808 */
2809static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2810{
2811 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2812}
2813
2814
2815/**
2816 * Is the FPU state in FXSAVE format or not.
2817 *
2818 * @returns true if it is, false if it's in FNSAVE.
2819 * @param pVCpu Pointer to the VMCPU.
2820 */
2821DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2822{
2823#ifdef RT_ARCH_AMD64
2824 NOREF(pIemCpu);
2825 return true;
2826#else
2827 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2828 return true;
2829#endif
2830}
2831
2832
2833/**
2834 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2835 *
2836 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2837 * segment limit.
2838 *
2839 * @param pIemCpu The per CPU data.
2840 * @param offNextInstr The offset of the next instruction.
2841 */
2842static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2843{
2844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2845 switch (pIemCpu->enmEffOpSize)
2846 {
2847 case IEMMODE_16BIT:
2848 {
2849 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2850 if ( uNewIp > pCtx->cs.u32Limit
2851 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2852 return iemRaiseGeneralProtectionFault0(pIemCpu);
2853 pCtx->rip = uNewIp;
2854 break;
2855 }
2856
2857 case IEMMODE_32BIT:
2858 {
2859 Assert(pCtx->rip <= UINT32_MAX);
2860 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2861
2862 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2863 if (uNewEip > pCtx->cs.u32Limit)
2864 return iemRaiseGeneralProtectionFault0(pIemCpu);
2865 pCtx->rip = uNewEip;
2866 break;
2867 }
2868
2869 case IEMMODE_64BIT:
2870 {
2871 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2872
2873 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2874 if (!IEM_IS_CANONICAL(uNewRip))
2875 return iemRaiseGeneralProtectionFault0(pIemCpu);
2876 pCtx->rip = uNewRip;
2877 break;
2878 }
2879
2880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2881 }
2882
2883 return VINF_SUCCESS;
2884}
2885
2886
2887/**
2888 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2889 *
2890 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2891 * segment limit.
2892 *
2893 * @returns Strict VBox status code.
2894 * @param pIemCpu The per CPU data.
2895 * @param offNextInstr The offset of the next instruction.
2896 */
2897static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2898{
2899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2900 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2901
2902 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2903 if ( uNewIp > pCtx->cs.u32Limit
2904 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2905 return iemRaiseGeneralProtectionFault0(pIemCpu);
2906 /** @todo Test 16-bit jump in 64-bit mode. */
2907 pCtx->rip = uNewIp;
2908
2909 return VINF_SUCCESS;
2910}
2911
2912
2913/**
2914 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2915 *
2916 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2917 * segment limit.
2918 *
2919 * @returns Strict VBox status code.
2920 * @param pIemCpu The per CPU data.
2921 * @param offNextInstr The offset of the next instruction.
2922 */
2923static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2924{
2925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2926 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2927
2928 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2929 {
2930 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2931
2932 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2933 if (uNewEip > pCtx->cs.u32Limit)
2934 return iemRaiseGeneralProtectionFault0(pIemCpu);
2935 pCtx->rip = uNewEip;
2936 }
2937 else
2938 {
2939 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2940
2941 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2942 if (!IEM_IS_CANONICAL(uNewRip))
2943 return iemRaiseGeneralProtectionFault0(pIemCpu);
2944 pCtx->rip = uNewRip;
2945 }
2946 return VINF_SUCCESS;
2947}
2948
2949
2950/**
2951 * Performs a near jump to the specified address.
2952 *
2953 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2954 * segment limit.
2955 *
2956 * @param pIemCpu The per CPU data.
2957 * @param uNewRip The new RIP value.
2958 */
2959static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2960{
2961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2962 switch (pIemCpu->enmEffOpSize)
2963 {
2964 case IEMMODE_16BIT:
2965 {
2966 Assert(uNewRip <= UINT16_MAX);
2967 if ( uNewRip > pCtx->cs.u32Limit
2968 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2969 return iemRaiseGeneralProtectionFault0(pIemCpu);
2970 /** @todo Test 16-bit jump in 64-bit mode. */
2971 pCtx->rip = uNewRip;
2972 break;
2973 }
2974
2975 case IEMMODE_32BIT:
2976 {
2977 Assert(uNewRip <= UINT32_MAX);
2978 Assert(pCtx->rip <= UINT32_MAX);
2979 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2980
2981 if (uNewRip > pCtx->cs.u32Limit)
2982 return iemRaiseGeneralProtectionFault0(pIemCpu);
2983 pCtx->rip = uNewRip;
2984 break;
2985 }
2986
2987 case IEMMODE_64BIT:
2988 {
2989 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2990
2991 if (!IEM_IS_CANONICAL(uNewRip))
2992 return iemRaiseGeneralProtectionFault0(pIemCpu);
2993 pCtx->rip = uNewRip;
2994 break;
2995 }
2996
2997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2998 }
2999
3000 return VINF_SUCCESS;
3001}
3002
3003
3004/**
3005 * Get the address of the top of the stack.
3006 *
3007 * @param pCtx The CPU context which SP/ESP/RSP should be
3008 * read.
3009 */
3010DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3011{
3012 if (pCtx->ss.Attr.n.u1Long)
3013 return pCtx->rsp;
3014 if (pCtx->ss.Attr.n.u1DefBig)
3015 return pCtx->esp;
3016 return pCtx->sp;
3017}
3018
3019
3020/**
3021 * Updates the RIP/EIP/IP to point to the next instruction.
3022 *
3023 * @param pIemCpu The per CPU data.
3024 * @param cbInstr The number of bytes to add.
3025 */
3026static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3027{
3028 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3029 switch (pIemCpu->enmCpuMode)
3030 {
3031 case IEMMODE_16BIT:
3032 Assert(pCtx->rip <= UINT16_MAX);
3033 pCtx->eip += cbInstr;
3034 pCtx->eip &= UINT32_C(0xffff);
3035 break;
3036
3037 case IEMMODE_32BIT:
3038 pCtx->eip += cbInstr;
3039 Assert(pCtx->rip <= UINT32_MAX);
3040 break;
3041
3042 case IEMMODE_64BIT:
3043 pCtx->rip += cbInstr;
3044 break;
3045 default: AssertFailed();
3046 }
3047}
3048
3049
3050/**
3051 * Updates the RIP/EIP/IP to point to the next instruction.
3052 *
3053 * @param pIemCpu The per CPU data.
3054 */
3055static void iemRegUpdateRip(PIEMCPU pIemCpu)
3056{
3057 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3058}
3059
3060
3061/**
3062 * Adds to the stack pointer.
3063 *
3064 * @param pCtx The CPU context which SP/ESP/RSP should be
3065 * updated.
3066 * @param cbToAdd The number of bytes to add.
3067 */
3068DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3069{
3070 if (pCtx->ss.Attr.n.u1Long)
3071 pCtx->rsp += cbToAdd;
3072 else if (pCtx->ss.Attr.n.u1DefBig)
3073 pCtx->esp += cbToAdd;
3074 else
3075 pCtx->sp += cbToAdd;
3076}
3077
3078
3079/**
3080 * Subtracts from the stack pointer.
3081 *
3082 * @param pCtx The CPU context which SP/ESP/RSP should be
3083 * updated.
3084 * @param cbToSub The number of bytes to subtract.
3085 */
3086DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3087{
3088 if (pCtx->ss.Attr.n.u1Long)
3089 pCtx->rsp -= cbToSub;
3090 else if (pCtx->ss.Attr.n.u1DefBig)
3091 pCtx->esp -= cbToSub;
3092 else
3093 pCtx->sp -= cbToSub;
3094}
3095
3096
3097/**
3098 * Adds to the temporary stack pointer.
3099 *
3100 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3101 * @param cbToAdd The number of bytes to add.
3102 * @param pCtx Where to get the current stack mode.
3103 */
3104DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3105{
3106 if (pCtx->ss.Attr.n.u1Long)
3107 pTmpRsp->u += cbToAdd;
3108 else if (pCtx->ss.Attr.n.u1DefBig)
3109 pTmpRsp->DWords.dw0 += cbToAdd;
3110 else
3111 pTmpRsp->Words.w0 += cbToAdd;
3112}
3113
3114
3115/**
3116 * Subtracts from the temporary stack pointer.
3117 *
3118 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3119 * @param cbToSub The number of bytes to subtract.
3120 * @param pCtx Where to get the current stack mode.
3121 */
3122DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3123{
3124 if (pCtx->ss.Attr.n.u1Long)
3125 pTmpRsp->u -= cbToSub;
3126 else if (pCtx->ss.Attr.n.u1DefBig)
3127 pTmpRsp->DWords.dw0 -= cbToSub;
3128 else
3129 pTmpRsp->Words.w0 -= cbToSub;
3130}
3131
3132
3133/**
3134 * Calculates the effective stack address for a push of the specified size as
3135 * well as the new RSP value (upper bits may be masked).
3136 *
3137 * @returns Effective stack addressf for the push.
3138 * @param pCtx Where to get the current stack mode.
3139 * @param cbItem The size of the stack item to pop.
3140 * @param puNewRsp Where to return the new RSP value.
3141 */
3142DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3143{
3144 RTUINT64U uTmpRsp;
3145 RTGCPTR GCPtrTop;
3146 uTmpRsp.u = pCtx->rsp;
3147
3148 if (pCtx->ss.Attr.n.u1Long)
3149 GCPtrTop = uTmpRsp.u -= cbItem;
3150 else if (pCtx->ss.Attr.n.u1DefBig)
3151 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3152 else
3153 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3154 *puNewRsp = uTmpRsp.u;
3155 return GCPtrTop;
3156}
3157
3158
3159/**
3160 * Gets the current stack pointer and calculates the value after a pop of the
3161 * specified size.
3162 *
3163 * @returns Current stack pointer.
3164 * @param pCtx Where to get the current stack mode.
3165 * @param cbItem The size of the stack item to pop.
3166 * @param puNewRsp Where to return the new RSP value.
3167 */
3168DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3169{
3170 RTUINT64U uTmpRsp;
3171 RTGCPTR GCPtrTop;
3172 uTmpRsp.u = pCtx->rsp;
3173
3174 if (pCtx->ss.Attr.n.u1Long)
3175 {
3176 GCPtrTop = uTmpRsp.u;
3177 uTmpRsp.u += cbItem;
3178 }
3179 else if (pCtx->ss.Attr.n.u1DefBig)
3180 {
3181 GCPtrTop = uTmpRsp.DWords.dw0;
3182 uTmpRsp.DWords.dw0 += cbItem;
3183 }
3184 else
3185 {
3186 GCPtrTop = uTmpRsp.Words.w0;
3187 uTmpRsp.Words.w0 += cbItem;
3188 }
3189 *puNewRsp = uTmpRsp.u;
3190 return GCPtrTop;
3191}
3192
3193
3194/**
3195 * Calculates the effective stack address for a push of the specified size as
3196 * well as the new temporary RSP value (upper bits may be masked).
3197 *
3198 * @returns Effective stack addressf for the push.
3199 * @param pTmpRsp The temporary stack pointer. This is updated.
3200 * @param cbItem The size of the stack item to pop.
3201 * @param puNewRsp Where to return the new RSP value.
3202 */
3203DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3204{
3205 RTGCPTR GCPtrTop;
3206
3207 if (pCtx->ss.Attr.n.u1Long)
3208 GCPtrTop = pTmpRsp->u -= cbItem;
3209 else if (pCtx->ss.Attr.n.u1DefBig)
3210 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3211 else
3212 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3213 return GCPtrTop;
3214}
3215
3216
3217/**
3218 * Gets the effective stack address for a pop of the specified size and
3219 * calculates and updates the temporary RSP.
3220 *
3221 * @returns Current stack pointer.
3222 * @param pTmpRsp The temporary stack pointer. This is updated.
3223 * @param pCtx Where to get the current stack mode.
3224 * @param cbItem The size of the stack item to pop.
3225 */
3226DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3227{
3228 RTGCPTR GCPtrTop;
3229 if (pCtx->ss.Attr.n.u1Long)
3230 {
3231 GCPtrTop = pTmpRsp->u;
3232 pTmpRsp->u += cbItem;
3233 }
3234 else if (pCtx->ss.Attr.n.u1DefBig)
3235 {
3236 GCPtrTop = pTmpRsp->DWords.dw0;
3237 pTmpRsp->DWords.dw0 += cbItem;
3238 }
3239 else
3240 {
3241 GCPtrTop = pTmpRsp->Words.w0;
3242 pTmpRsp->Words.w0 += cbItem;
3243 }
3244 return GCPtrTop;
3245}
3246
3247
3248/**
3249 * Checks if an Intel CPUID feature bit is set.
3250 *
3251 * @returns true / false.
3252 *
3253 * @param pIemCpu The IEM per CPU data.
3254 * @param fEdx The EDX bit to test, or 0 if ECX.
3255 * @param fEcx The ECX bit to test, or 0 if EDX.
3256 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3257 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3258 */
3259static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3260{
3261 uint32_t uEax, uEbx, uEcx, uEdx;
3262 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3263 return (fEcx && (uEcx & fEcx))
3264 || (fEdx && (uEdx & fEdx));
3265}
3266
3267
3268/**
3269 * Checks if an AMD CPUID feature bit is set.
3270 *
3271 * @returns true / false.
3272 *
3273 * @param pIemCpu The IEM per CPU data.
3274 * @param fEdx The EDX bit to test, or 0 if ECX.
3275 * @param fEcx The ECX bit to test, or 0 if EDX.
3276 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3277 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3278 */
3279static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3280{
3281 uint32_t uEax, uEbx, uEcx, uEdx;
3282 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3283 return (fEcx && (uEcx & fEcx))
3284 || (fEdx && (uEdx & fEdx));
3285}
3286
3287/** @} */
3288
3289
3290/** @name FPU access and helpers.
3291 *
3292 * @{
3293 */
3294
3295
3296/**
3297 * Hook for preparing to use the host FPU.
3298 *
3299 * This is necessary in ring-0 and raw-mode context.
3300 *
3301 * @param pIemCpu The IEM per CPU data.
3302 */
3303DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3304{
3305#ifdef IN_RING3
3306 NOREF(pIemCpu);
3307#else
3308/** @todo RZ: FIXME */
3309//# error "Implement me"
3310#endif
3311}
3312
3313
3314/**
3315 * Stores a QNaN value into a FPU register.
3316 *
3317 * @param pReg Pointer to the register.
3318 */
3319DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3320{
3321 pReg->au32[0] = UINT32_C(0x00000000);
3322 pReg->au32[1] = UINT32_C(0xc0000000);
3323 pReg->au16[4] = UINT16_C(0xffff);
3324}
3325
3326
3327/**
3328 * Updates the FOP, FPU.CS and FPUIP registers.
3329 *
3330 * @param pIemCpu The IEM per CPU data.
3331 * @param pCtx The CPU context.
3332 */
3333DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3334{
3335 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3336 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3337 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3338 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3339 {
3340 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3341 * happens in real mode here based on the fnsave and fnstenv images. */
3342 pCtx->fpu.CS = 0;
3343 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3344 }
3345 else
3346 {
3347 pCtx->fpu.CS = pCtx->cs.Sel;
3348 pCtx->fpu.FPUIP = pCtx->rip;
3349 }
3350}
3351
3352
3353/**
3354 * Updates the FPU.DS and FPUDP registers.
3355 *
3356 * @param pIemCpu The IEM per CPU data.
3357 * @param pCtx The CPU context.
3358 * @param iEffSeg The effective segment register.
3359 * @param GCPtrEff The effective address relative to @a iEffSeg.
3360 */
3361DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3362{
3363 RTSEL sel;
3364 switch (iEffSeg)
3365 {
3366 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3367 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3368 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3369 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3370 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3371 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3372 default:
3373 AssertMsgFailed(("%d\n", iEffSeg));
3374 sel = pCtx->ds.Sel;
3375 }
3376 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3377 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3378 {
3379 pCtx->fpu.DS = 0;
3380 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3381 }
3382 else
3383 {
3384 pCtx->fpu.DS = sel;
3385 pCtx->fpu.FPUDP = GCPtrEff;
3386 }
3387}
3388
3389
3390/**
3391 * Rotates the stack registers in the push direction.
3392 *
3393 * @param pCtx The CPU context.
3394 * @remarks This is a complete waste of time, but fxsave stores the registers in
3395 * stack order.
3396 */
3397DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3398{
3399 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3400 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3401 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3402 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3403 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3404 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3405 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3406 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3407 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3408}
3409
3410
3411/**
3412 * Rotates the stack registers in the pop direction.
3413 *
3414 * @param pCtx The CPU context.
3415 * @remarks This is a complete waste of time, but fxsave stores the registers in
3416 * stack order.
3417 */
3418DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3419{
3420 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3421 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3422 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3423 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3424 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3425 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3426 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3427 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3428 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3429}
3430
3431
3432/**
3433 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3434 * exception prevents it.
3435 *
3436 * @param pIemCpu The IEM per CPU data.
3437 * @param pResult The FPU operation result to push.
3438 * @param pCtx The CPU context.
3439 */
3440static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3441{
3442 /* Update FSW and bail if there are pending exceptions afterwards. */
3443 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3444 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3445 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3446 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3447 {
3448 pCtx->fpu.FSW = fFsw;
3449 return;
3450 }
3451
3452 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3453 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3454 {
3455 /* All is fine, push the actual value. */
3456 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3457 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3458 }
3459 else if (pCtx->fpu.FCW & X86_FCW_IM)
3460 {
3461 /* Masked stack overflow, push QNaN. */
3462 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3463 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3464 }
3465 else
3466 {
3467 /* Raise stack overflow, don't push anything. */
3468 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3469 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3470 return;
3471 }
3472
3473 fFsw &= ~X86_FSW_TOP_MASK;
3474 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3475 pCtx->fpu.FSW = fFsw;
3476
3477 iemFpuRotateStackPush(pCtx);
3478}
3479
3480
3481/**
3482 * Stores a result in a FPU register and updates the FSW and FTW.
3483 *
3484 * @param pIemCpu The IEM per CPU data.
3485 * @param pResult The result to store.
3486 * @param iStReg Which FPU register to store it in.
3487 * @param pCtx The CPU context.
3488 */
3489static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3490{
3491 Assert(iStReg < 8);
3492 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3493 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3494 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3495 pCtx->fpu.FTW |= RT_BIT(iReg);
3496 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3497}
3498
3499
3500/**
3501 * Only updates the FPU status word (FSW) with the result of the current
3502 * instruction.
3503 *
3504 * @param pCtx The CPU context.
3505 * @param u16FSW The FSW output of the current instruction.
3506 */
3507static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3508{
3509 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3510 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3511}
3512
3513
3514/**
3515 * Pops one item off the FPU stack if no pending exception prevents it.
3516 *
3517 * @param pCtx The CPU context.
3518 */
3519static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3520{
3521 /* Check pending exceptions. */
3522 uint16_t uFSW = pCtx->fpu.FSW;
3523 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3524 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3525 return;
3526
3527 /* TOP--. */
3528 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3529 uFSW &= ~X86_FSW_TOP_MASK;
3530 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3531 pCtx->fpu.FSW = uFSW;
3532
3533 /* Mark the previous ST0 as empty. */
3534 iOldTop >>= X86_FSW_TOP_SHIFT;
3535 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3536
3537 /* Rotate the registers. */
3538 iemFpuRotateStackPop(pCtx);
3539}
3540
3541
3542/**
3543 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3544 *
3545 * @param pIemCpu The IEM per CPU data.
3546 * @param pResult The FPU operation result to push.
3547 */
3548static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3549{
3550 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3551 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3552 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3553}
3554
3555
3556/**
3557 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3558 * and sets FPUDP and FPUDS.
3559 *
3560 * @param pIemCpu The IEM per CPU data.
3561 * @param pResult The FPU operation result to push.
3562 * @param iEffSeg The effective segment register.
3563 * @param GCPtrEff The effective address relative to @a iEffSeg.
3564 */
3565static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3566{
3567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3568 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3569 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3570 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3571}
3572
3573
3574/**
3575 * Replace ST0 with the first value and push the second onto the FPU stack,
3576 * unless a pending exception prevents it.
3577 *
3578 * @param pIemCpu The IEM per CPU data.
3579 * @param pResult The FPU operation result to store and push.
3580 */
3581static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3582{
3583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3584 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3585
3586 /* Update FSW and bail if there are pending exceptions afterwards. */
3587 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3588 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3589 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3590 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3591 {
3592 pCtx->fpu.FSW = fFsw;
3593 return;
3594 }
3595
3596 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3597 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3598 {
3599 /* All is fine, push the actual value. */
3600 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3601 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3602 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3603 }
3604 else if (pCtx->fpu.FCW & X86_FCW_IM)
3605 {
3606 /* Masked stack overflow, push QNaN. */
3607 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3608 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3609 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3610 }
3611 else
3612 {
3613 /* Raise stack overflow, don't push anything. */
3614 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3615 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3616 return;
3617 }
3618
3619 fFsw &= ~X86_FSW_TOP_MASK;
3620 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3621 pCtx->fpu.FSW = fFsw;
3622
3623 iemFpuRotateStackPush(pCtx);
3624}
3625
3626
3627/**
3628 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3629 * FOP.
3630 *
3631 * @param pIemCpu The IEM per CPU data.
3632 * @param pResult The result to store.
3633 * @param iStReg Which FPU register to store it in.
3634 * @param pCtx The CPU context.
3635 */
3636static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3637{
3638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3639 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3640 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3641}
3642
3643
3644/**
3645 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3646 * FOP, and then pops the stack.
3647 *
3648 * @param pIemCpu The IEM per CPU data.
3649 * @param pResult The result to store.
3650 * @param iStReg Which FPU register to store it in.
3651 * @param pCtx The CPU context.
3652 */
3653static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3654{
3655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3656 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3657 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3658 iemFpuMaybePopOne(pCtx);
3659}
3660
3661
3662/**
3663 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3664 * FPUDP, and FPUDS.
3665 *
3666 * @param pIemCpu The IEM per CPU data.
3667 * @param pResult The result to store.
3668 * @param iStReg Which FPU register to store it in.
3669 * @param pCtx The CPU context.
3670 * @param iEffSeg The effective memory operand selector register.
3671 * @param GCPtrEff The effective memory operand offset.
3672 */
3673static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3674{
3675 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3676 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3677 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3678 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3679}
3680
3681
3682/**
3683 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3684 * FPUDP, and FPUDS, and then pops the stack.
3685 *
3686 * @param pIemCpu The IEM per CPU data.
3687 * @param pResult The result to store.
3688 * @param iStReg Which FPU register to store it in.
3689 * @param pCtx The CPU context.
3690 * @param iEffSeg The effective memory operand selector register.
3691 * @param GCPtrEff The effective memory operand offset.
3692 */
3693static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
3694 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3695{
3696 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3697 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3698 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3699 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3700 iemFpuMaybePopOne(pCtx);
3701}
3702
3703
3704/**
3705 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
3706 *
3707 * @param pIemCpu The IEM per CPU data.
3708 */
3709static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
3710{
3711 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
3712}
3713
3714
3715/**
3716 * Marks the specified stack register as free (for FFREE).
3717 *
3718 * @param pIemCpu The IEM per CPU data.
3719 * @param iStReg The register to free.
3720 */
3721static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
3722{
3723 Assert(iStReg < 8);
3724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3725 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3726 pCtx->fpu.FTW &= ~RT_BIT(iReg);
3727}
3728
3729
3730/**
3731 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3732 *
3733 * @param pIemCpu The IEM per CPU data.
3734 */
3735static void iemFpuStackIncTop(PIEMCPU pIemCpu)
3736{
3737 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3738 uint16_t uFsw = pCtx->fpu.FSW;
3739 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3740 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3741 uFsw &= ~X86_FSW_TOP_MASK;
3742 uFsw |= uTop;
3743 pCtx->fpu.FSW = uFsw;
3744}
3745
3746
3747/**
3748 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3749 *
3750 * @param pIemCpu The IEM per CPU data.
3751 */
3752static void iemFpuStackDecTop(PIEMCPU pIemCpu)
3753{
3754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3755 uint16_t uFsw = pCtx->fpu.FSW;
3756 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3757 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3758 uFsw &= ~X86_FSW_TOP_MASK;
3759 uFsw |= uTop;
3760 pCtx->fpu.FSW = uFsw;
3761}
3762
3763
3764/**
3765 * Updates the FSW, FOP, FPUIP, and FPUCS.
3766 *
3767 * @param pIemCpu The IEM per CPU data.
3768 * @param u16FSW The FSW from the current instruction.
3769 */
3770static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
3771{
3772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3773 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3774 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3775}
3776
3777
3778/**
3779 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
3780 *
3781 * @param pIemCpu The IEM per CPU data.
3782 * @param u16FSW The FSW from the current instruction.
3783 */
3784static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
3785{
3786 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3787 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3788 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3789 iemFpuMaybePopOne(pCtx);
3790}
3791
3792
3793/**
3794 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
3795 *
3796 * @param pIemCpu The IEM per CPU data.
3797 * @param u16FSW The FSW from the current instruction.
3798 * @param iEffSeg The effective memory operand selector register.
3799 * @param GCPtrEff The effective memory operand offset.
3800 */
3801static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3802{
3803 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3804 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3805 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3806 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3807}
3808
3809
3810/**
3811 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
3812 *
3813 * @param pIemCpu The IEM per CPU data.
3814 * @param u16FSW The FSW from the current instruction.
3815 */
3816static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
3817{
3818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3820 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3821 iemFpuMaybePopOne(pCtx);
3822 iemFpuMaybePopOne(pCtx);
3823}
3824
3825
3826/**
3827 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
3828 *
3829 * @param pIemCpu The IEM per CPU data.
3830 * @param u16FSW The FSW from the current instruction.
3831 * @param iEffSeg The effective memory operand selector register.
3832 * @param GCPtrEff The effective memory operand offset.
3833 */
3834static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3835{
3836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3837 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3838 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3839 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3840 iemFpuMaybePopOne(pCtx);
3841}
3842
3843
3844/**
3845 * Worker routine for raising an FPU stack underflow exception.
3846 *
3847 * @param pIemCpu The IEM per CPU data.
3848 * @param iStReg The stack register being accessed.
3849 * @param pCtx The CPU context.
3850 */
3851static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
3852{
3853 Assert(iStReg < 8 || iStReg == UINT8_MAX);
3854 if (pCtx->fpu.FCW & X86_FCW_IM)
3855 {
3856 /* Masked underflow. */
3857 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3858 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3859 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3860 if (iStReg != UINT8_MAX)
3861 {
3862 pCtx->fpu.FTW |= RT_BIT(iReg);
3863 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
3864 }
3865 }
3866 else
3867 {
3868 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3869 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3870 }
3871}
3872
3873
3874/**
3875 * Raises a FPU stack underflow exception.
3876 *
3877 * @param pIemCpu The IEM per CPU data.
3878 * @param iStReg The destination register that should be loaded
3879 * with QNaN if \#IS is not masked. Specify
3880 * UINT8_MAX if none (like for fcom).
3881 */
3882DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
3883{
3884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3885 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3886 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3887}
3888
3889
3890DECL_NO_INLINE(static, void)
3891iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3892{
3893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3894 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3895 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3896 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3897}
3898
3899
3900DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
3901{
3902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3903 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3904 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3905 iemFpuMaybePopOne(pCtx);
3906}
3907
3908
3909DECL_NO_INLINE(static, void)
3910iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3911{
3912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3913 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3914 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3915 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3916 iemFpuMaybePopOne(pCtx);
3917}
3918
3919
3920DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
3921{
3922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3923 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3924 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
3925 iemFpuMaybePopOne(pCtx);
3926 iemFpuMaybePopOne(pCtx);
3927}
3928
3929
3930DECL_NO_INLINE(static, void)
3931iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
3932{
3933 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3934 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3935
3936 if (pCtx->fpu.FCW & X86_FCW_IM)
3937 {
3938 /* Masked overflow - Push QNaN. */
3939 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3940 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3941 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3942 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3943 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3944 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3945 iemFpuRotateStackPush(pCtx);
3946 }
3947 else
3948 {
3949 /* Exception pending - don't change TOP or the register stack. */
3950 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3951 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3952 }
3953}
3954
3955
3956DECL_NO_INLINE(static, void)
3957iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
3958{
3959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3960 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3961
3962 if (pCtx->fpu.FCW & X86_FCW_IM)
3963 {
3964 /* Masked overflow - Push QNaN. */
3965 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3966 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3967 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3968 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3969 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3970 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3971 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3972 iemFpuRotateStackPush(pCtx);
3973 }
3974 else
3975 {
3976 /* Exception pending - don't change TOP or the register stack. */
3977 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3978 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3979 }
3980}
3981
3982
3983/**
3984 * Worker routine for raising an FPU stack overflow exception on a push.
3985 *
3986 * @param pIemCpu The IEM per CPU data.
3987 * @param pCtx The CPU context.
3988 */
3989static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3990{
3991 if (pCtx->fpu.FCW & X86_FCW_IM)
3992 {
3993 /* Masked overflow. */
3994 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3995 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3996 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3997 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3998 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3999 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4000 iemFpuRotateStackPush(pCtx);
4001 }
4002 else
4003 {
4004 /* Exception pending - don't change TOP or the register stack. */
4005 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4006 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4007 }
4008}
4009
4010
4011/**
4012 * Raises a FPU stack overflow exception on a push.
4013 *
4014 * @param pIemCpu The IEM per CPU data.
4015 */
4016DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4017{
4018 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4019 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4020 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4021}
4022
4023
4024/**
4025 * Raises a FPU stack overflow exception on a push with a memory operand.
4026 *
4027 * @param pIemCpu The IEM per CPU data.
4028 * @param iEffSeg The effective memory operand selector register.
4029 * @param GCPtrEff The effective memory operand offset.
4030 */
4031DECL_NO_INLINE(static, void)
4032iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4033{
4034 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4035 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4036 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4037 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4038}
4039
4040
4041static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4042{
4043 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4044 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4045 if (pCtx->fpu.FTW & RT_BIT(iReg))
4046 return VINF_SUCCESS;
4047 return VERR_NOT_FOUND;
4048}
4049
4050
4051static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4052{
4053 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4054 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4055 if (pCtx->fpu.FTW & RT_BIT(iReg))
4056 {
4057 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4058 return VINF_SUCCESS;
4059 }
4060 return VERR_NOT_FOUND;
4061}
4062
4063
4064static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4065 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4066{
4067 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4068 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4069 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4070 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4071 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4072 {
4073 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4074 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4075 return VINF_SUCCESS;
4076 }
4077 return VERR_NOT_FOUND;
4078}
4079
4080
4081static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4082{
4083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4084 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4085 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4086 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4087 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4088 {
4089 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4090 return VINF_SUCCESS;
4091 }
4092 return VERR_NOT_FOUND;
4093}
4094
4095
4096/**
4097 * Updates the FPU exception status after FCW is changed.
4098 *
4099 * @param pCtx The CPU context.
4100 */
4101static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4102{
4103 uint16_t u16Fsw = pCtx->fpu.FSW;
4104 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4105 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4106 else
4107 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4108 pCtx->fpu.FSW = u16Fsw;
4109}
4110
4111
4112/**
4113 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4114 *
4115 * @returns The full FTW.
4116 * @param pCtx The CPU state.
4117 */
4118static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4119{
4120 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4121 uint16_t u16Ftw = 0;
4122 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4123 for (unsigned iSt = 0; iSt < 8; iSt++)
4124 {
4125 unsigned const iReg = (iSt + iTop) & 7;
4126 if (!(u8Ftw & RT_BIT(iReg)))
4127 u16Ftw |= 3 << (iReg * 2); /* empty */
4128 else
4129 {
4130 uint16_t uTag;
4131 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4132 if (pr80Reg->s.uExponent == 0x7fff)
4133 uTag = 2; /* Exponent is all 1's => Special. */
4134 else if (pr80Reg->s.uExponent == 0x0000)
4135 {
4136 if (pr80Reg->s.u64Mantissa == 0x0000)
4137 uTag = 1; /* All bits are zero => Zero. */
4138 else
4139 uTag = 2; /* Must be special. */
4140 }
4141 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4142 uTag = 0; /* Valid. */
4143 else
4144 uTag = 2; /* Must be special. */
4145
4146 u16Ftw |= uTag << (iReg * 2); /* empty */
4147 }
4148 }
4149
4150 return u16Ftw;
4151}
4152
4153
4154/**
4155 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4156 *
4157 * @returns The compressed FTW.
4158 * @param u16FullFtw The full FTW to convert.
4159 */
4160static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4161{
4162 uint8_t u8Ftw = 0;
4163 for (unsigned i = 0; i < 8; i++)
4164 {
4165 if ((u16FullFtw & 3) != 3 /*empty*/)
4166 u8Ftw |= RT_BIT(i);
4167 u16FullFtw >>= 2;
4168 }
4169
4170 return u8Ftw;
4171}
4172
4173/** @} */
4174
4175
4176/** @name Memory access.
4177 *
4178 * @{
4179 */
4180
4181
4182/**
4183 * Checks if the given segment can be written to, raise the appropriate
4184 * exception if not.
4185 *
4186 * @returns VBox strict status code.
4187 *
4188 * @param pIemCpu The IEM per CPU data.
4189 * @param pHid Pointer to the hidden register.
4190 * @param iSegReg The register number.
4191 */
4192static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4193{
4194 if (!pHid->Attr.n.u1Present)
4195 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4196
4197 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4198 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4199 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4200 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4201
4202 /** @todo DPL/RPL/CPL? */
4203
4204 return VINF_SUCCESS;
4205}
4206
4207
4208/**
4209 * Checks if the given segment can be read from, raise the appropriate
4210 * exception if not.
4211 *
4212 * @returns VBox strict status code.
4213 *
4214 * @param pIemCpu The IEM per CPU data.
4215 * @param pHid Pointer to the hidden register.
4216 * @param iSegReg The register number.
4217 */
4218static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4219{
4220 if (!pHid->Attr.n.u1Present)
4221 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4222
4223 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4224 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4225 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4226
4227 /** @todo DPL/RPL/CPL? */
4228
4229 return VINF_SUCCESS;
4230}
4231
4232
4233/**
4234 * Applies the segment limit, base and attributes.
4235 *
4236 * This may raise a \#GP or \#SS.
4237 *
4238 * @returns VBox strict status code.
4239 *
4240 * @param pIemCpu The IEM per CPU data.
4241 * @param fAccess The kind of access which is being performed.
4242 * @param iSegReg The index of the segment register to apply.
4243 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4244 * TSS, ++).
4245 * @param pGCPtrMem Pointer to the guest memory address to apply
4246 * segmentation to. Input and output parameter.
4247 */
4248static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4249 size_t cbMem, PRTGCPTR pGCPtrMem)
4250{
4251 if (iSegReg == UINT8_MAX)
4252 return VINF_SUCCESS;
4253
4254 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4255 switch (pIemCpu->enmCpuMode)
4256 {
4257 case IEMMODE_16BIT:
4258 case IEMMODE_32BIT:
4259 {
4260 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4261 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4262
4263 Assert(pSel->Attr.n.u1Present);
4264 Assert(pSel->Attr.n.u1DescType);
4265 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4266 {
4267 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4268 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4269 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4270
4271 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4272 {
4273 /** @todo CPL check. */
4274 }
4275
4276 /*
4277 * There are two kinds of data selectors, normal and expand down.
4278 */
4279 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4280 {
4281 if ( GCPtrFirst32 > pSel->u32Limit
4282 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4283 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4284
4285 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4286 }
4287 else
4288 {
4289 /** @todo implement expand down segments. */
4290 AssertFailed(/** @todo implement this */);
4291 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
4292 }
4293 }
4294 else
4295 {
4296
4297 /*
4298 * Code selector and usually be used to read thru, writing is
4299 * only permitted in real and V8086 mode.
4300 */
4301 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4302 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4303 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4304 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4305 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4306
4307 if ( GCPtrFirst32 > pSel->u32Limit
4308 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4309 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4310
4311 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4312 {
4313 /** @todo CPL check. */
4314 }
4315
4316 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4317 }
4318 return VINF_SUCCESS;
4319 }
4320
4321 case IEMMODE_64BIT:
4322 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4323 *pGCPtrMem += pSel->u64Base;
4324 return VINF_SUCCESS;
4325
4326 default:
4327 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4328 }
4329}
4330
4331
4332/**
4333 * Translates a virtual address to a physical physical address and checks if we
4334 * can access the page as specified.
4335 *
4336 * @param pIemCpu The IEM per CPU data.
4337 * @param GCPtrMem The virtual address.
4338 * @param fAccess The intended access.
4339 * @param pGCPhysMem Where to return the physical address.
4340 */
4341static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4342 PRTGCPHYS pGCPhysMem)
4343{
4344 /** @todo Need a different PGM interface here. We're currently using
4345 * generic / REM interfaces. this won't cut it for R0 & RC. */
4346 RTGCPHYS GCPhys;
4347 uint64_t fFlags;
4348 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4349 if (RT_FAILURE(rc))
4350 {
4351 /** @todo Check unassigned memory in unpaged mode. */
4352 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4353 *pGCPhysMem = NIL_RTGCPHYS;
4354 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4355 }
4356
4357 /* If the page is writable and does not have the no-exec bit set, all
4358 access is allowed. Otherwise we'll have to check more carefully... */
4359 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4360 {
4361 /* Write to read only memory? */
4362 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4363 && !(fFlags & X86_PTE_RW)
4364 && ( pIemCpu->uCpl != 0
4365 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4366 {
4367 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4368 *pGCPhysMem = NIL_RTGCPHYS;
4369 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4370 }
4371
4372 /* Kernel memory accessed by userland? */
4373 if ( !(fFlags & X86_PTE_US)
4374 && pIemCpu->uCpl == 3
4375 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4376 {
4377 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4378 *pGCPhysMem = NIL_RTGCPHYS;
4379 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4380 }
4381
4382 /* Executing non-executable memory? */
4383 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4384 && (fFlags & X86_PTE_PAE_NX)
4385 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4386 {
4387 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4388 *pGCPhysMem = NIL_RTGCPHYS;
4389 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4390 VERR_ACCESS_DENIED);
4391 }
4392 }
4393
4394 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4395 *pGCPhysMem = GCPhys;
4396 return VINF_SUCCESS;
4397}
4398
4399
4400
4401/**
4402 * Maps a physical page.
4403 *
4404 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4405 * @param pIemCpu The IEM per CPU data.
4406 * @param GCPhysMem The physical address.
4407 * @param fAccess The intended access.
4408 * @param ppvMem Where to return the mapping address.
4409 */
4410static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
4411{
4412#ifdef IEM_VERIFICATION_MODE
4413 /* Force the alternative path so we can ignore writes. */
4414 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4415 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4416#endif
4417
4418 /*
4419 * If we can map the page without trouble, do a block processing
4420 * until the end of the current page.
4421 */
4422 /** @todo need some better API. */
4423#ifdef IN_RING3
4424 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4425 GCPhysMem,
4426 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4427 ppvMem);
4428#else
4429//# error "Implement me"
4430 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4431 return PGMPhysGCPhys2CCPtr(IEMCPU_TO_VM(pIemCpu),
4432 GCPhysMem,
4433 ppvMem,
4434 /** @todo pLock */ NULL);
4435 return PGMPhysGCPhys2CCPtrReadOnly(IEMCPU_TO_VM(pIemCpu),
4436 GCPhysMem,
4437 (void const **)ppvMem,
4438 /** @todo pLock */ NULL);
4439#endif
4440}
4441
4442
4443/**
4444 * Unmap a page previously mapped by iemMemPageMap.
4445 *
4446 * This is currently a dummy function.
4447 *
4448 * @param pIemCpu The IEM per CPU data.
4449 * @param GCPhysMem The physical address.
4450 * @param fAccess The intended access.
4451 * @param pvMem What iemMemPageMap returned.
4452 */
4453DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
4454{
4455 NOREF(pIemCpu);
4456 NOREF(GCPhysMem);
4457 NOREF(fAccess);
4458 NOREF(pvMem);
4459}
4460
4461
4462/**
4463 * Looks up a memory mapping entry.
4464 *
4465 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4466 * @param pIemCpu The IEM per CPU data.
4467 * @param pvMem The memory address.
4468 * @param fAccess The access to.
4469 */
4470DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4471{
4472 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4473 if ( pIemCpu->aMemMappings[0].pv == pvMem
4474 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4475 return 0;
4476 if ( pIemCpu->aMemMappings[1].pv == pvMem
4477 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4478 return 1;
4479 if ( pIemCpu->aMemMappings[2].pv == pvMem
4480 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4481 return 2;
4482 return VERR_NOT_FOUND;
4483}
4484
4485
4486/**
4487 * Finds a free memmap entry when using iNextMapping doesn't work.
4488 *
4489 * @returns Memory mapping index, 1024 on failure.
4490 * @param pIemCpu The IEM per CPU data.
4491 */
4492static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4493{
4494 /*
4495 * The easy case.
4496 */
4497 if (pIemCpu->cActiveMappings == 0)
4498 {
4499 pIemCpu->iNextMapping = 1;
4500 return 0;
4501 }
4502
4503 /* There should be enough mappings for all instructions. */
4504 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4505
4506 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4507 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4508 return i;
4509
4510 AssertFailedReturn(1024);
4511}
4512
4513
4514/**
4515 * Commits a bounce buffer that needs writing back and unmaps it.
4516 *
4517 * @returns Strict VBox status code.
4518 * @param pIemCpu The IEM per CPU data.
4519 * @param iMemMap The index of the buffer to commit.
4520 */
4521static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4522{
4523 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4524 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4525
4526 /*
4527 * Do the writing.
4528 */
4529 int rc;
4530 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4531 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4532 {
4533 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4534 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4535 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4536 if (!pIemCpu->fByPassHandlers)
4537 {
4538 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4539 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4540 pbBuf,
4541 cbFirst);
4542 if (cbSecond && rc == VINF_SUCCESS)
4543 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4544 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4545 pbBuf + cbFirst,
4546 cbSecond);
4547 }
4548 else
4549 {
4550 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4551 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4552 pbBuf,
4553 cbFirst);
4554 if (cbSecond && rc == VINF_SUCCESS)
4555 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4556 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4557 pbBuf + cbFirst,
4558 cbSecond);
4559 }
4560 }
4561 else
4562 rc = VINF_SUCCESS;
4563
4564#ifdef IEM_VERIFICATION_MODE
4565 /*
4566 * Record the write(s).
4567 */
4568 if (!pIemCpu->fNoRem)
4569 {
4570 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4571 if (pEvtRec)
4572 {
4573 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4574 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4575 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4576 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4577 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4578 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4579 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4580 }
4581 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4582 {
4583 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4584 if (pEvtRec)
4585 {
4586 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4587 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4588 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4589 memcpy(pEvtRec->u.RamWrite.ab,
4590 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4591 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4592 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4593 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4594 }
4595 }
4596 }
4597#endif
4598
4599 /*
4600 * Free the mapping entry.
4601 */
4602 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4603 Assert(pIemCpu->cActiveMappings != 0);
4604 pIemCpu->cActiveMappings--;
4605 return rc;
4606}
4607
4608
4609/**
4610 * iemMemMap worker that deals with a request crossing pages.
4611 */
4612static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4613 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4614{
4615 /*
4616 * Do the address translations.
4617 */
4618 RTGCPHYS GCPhysFirst;
4619 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4620 if (rcStrict != VINF_SUCCESS)
4621 return rcStrict;
4622
4623 RTGCPHYS GCPhysSecond;
4624 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4625 if (rcStrict != VINF_SUCCESS)
4626 return rcStrict;
4627 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4628
4629 /*
4630 * Read in the current memory content if it's a read, execute or partial
4631 * write access.
4632 */
4633 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4634 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4635 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4636
4637 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4638 {
4639 int rc;
4640 if (!pIemCpu->fByPassHandlers)
4641 {
4642 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4643 if (rc != VINF_SUCCESS)
4644 return rc;
4645 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4646 if (rc != VINF_SUCCESS)
4647 return rc;
4648 }
4649 else
4650 {
4651 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4652 if (rc != VINF_SUCCESS)
4653 return rc;
4654 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4655 if (rc != VINF_SUCCESS)
4656 return rc;
4657 }
4658
4659#ifdef IEM_VERIFICATION_MODE
4660 if ( !pIemCpu->fNoRem
4661 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
4662 {
4663 /*
4664 * Record the reads.
4665 */
4666 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4667 if (pEvtRec)
4668 {
4669 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4670 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4671 pEvtRec->u.RamRead.cb = cbFirstPage;
4672 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4673 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4674 }
4675 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4676 if (pEvtRec)
4677 {
4678 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4679 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4680 pEvtRec->u.RamRead.cb = cbSecondPage;
4681 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4682 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4683 }
4684 }
4685#endif
4686 }
4687#ifdef VBOX_STRICT
4688 else
4689 memset(pbBuf, 0xcc, cbMem);
4690#endif
4691#ifdef VBOX_STRICT
4692 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4693 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4694#endif
4695
4696 /*
4697 * Commit the bounce buffer entry.
4698 */
4699 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4700 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4701 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4702 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4703 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4704 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4705 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4706 pIemCpu->cActiveMappings++;
4707
4708 *ppvMem = pbBuf;
4709 return VINF_SUCCESS;
4710}
4711
4712
4713/**
4714 * iemMemMap woker that deals with iemMemPageMap failures.
4715 */
4716static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4717 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4718{
4719 /*
4720 * Filter out conditions we can handle and the ones which shouldn't happen.
4721 */
4722 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
4723 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4724 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4725 {
4726 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4727 return rcMap;
4728 }
4729 pIemCpu->cPotentialExits++;
4730
4731 /*
4732 * Read in the current memory content if it's a read, execute or partial
4733 * write access.
4734 */
4735 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4736 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4737 {
4738 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
4739 memset(pbBuf, 0xff, cbMem);
4740 else
4741 {
4742 int rc;
4743 if (!pIemCpu->fByPassHandlers)
4744 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
4745 else
4746 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
4747 if (rc != VINF_SUCCESS)
4748 return rc;
4749 }
4750
4751#ifdef IEM_VERIFICATION_MODE
4752 if ( !pIemCpu->fNoRem
4753 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
4754 {
4755 /*
4756 * Record the read.
4757 */
4758 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4759 if (pEvtRec)
4760 {
4761 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4762 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4763 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
4764 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4765 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4766 }
4767 }
4768#endif
4769 }
4770#ifdef VBOX_STRICT
4771 else
4772 memset(pbBuf, 0xcc, cbMem);
4773#endif
4774#ifdef VBOX_STRICT
4775 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4776 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4777#endif
4778
4779 /*
4780 * Commit the bounce buffer entry.
4781 */
4782 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4783 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
4784 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
4785 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
4786 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
4787 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4788 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4789 pIemCpu->cActiveMappings++;
4790
4791 *ppvMem = pbBuf;
4792 return VINF_SUCCESS;
4793}
4794
4795
4796
4797/**
4798 * Maps the specified guest memory for the given kind of access.
4799 *
4800 * This may be using bounce buffering of the memory if it's crossing a page
4801 * boundary or if there is an access handler installed for any of it. Because
4802 * of lock prefix guarantees, we're in for some extra clutter when this
4803 * happens.
4804 *
4805 * This may raise a \#GP, \#SS, \#PF or \#AC.
4806 *
4807 * @returns VBox strict status code.
4808 *
4809 * @param pIemCpu The IEM per CPU data.
4810 * @param ppvMem Where to return the pointer to the mapped
4811 * memory.
4812 * @param cbMem The number of bytes to map. This is usually 1,
4813 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
4814 * string operations it can be up to a page.
4815 * @param iSegReg The index of the segment register to use for
4816 * this access. The base and limits are checked.
4817 * Use UINT8_MAX to indicate that no segmentation
4818 * is required (for IDT, GDT and LDT accesses).
4819 * @param GCPtrMem The address of the guest memory.
4820 * @param a_fAccess How the memory is being accessed. The
4821 * IEM_ACCESS_TYPE_XXX bit is used to figure out
4822 * how to map the memory, while the
4823 * IEM_ACCESS_WHAT_XXX bit is used when raising
4824 * exceptions.
4825 */
4826static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
4827{
4828 /*
4829 * Check the input and figure out which mapping entry to use.
4830 */
4831 Assert(cbMem <= 32 || cbMem == 512);
4832 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
4833
4834 unsigned iMemMap = pIemCpu->iNextMapping;
4835 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
4836 {
4837 iMemMap = iemMemMapFindFree(pIemCpu);
4838 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
4839 }
4840
4841 /*
4842 * Map the memory, checking that we can actually access it. If something
4843 * slightly complicated happens, fall back on bounce buffering.
4844 */
4845 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
4846 if (rcStrict != VINF_SUCCESS)
4847 return rcStrict;
4848
4849 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
4850 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
4851
4852 RTGCPHYS GCPhysFirst;
4853 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
4854 if (rcStrict != VINF_SUCCESS)
4855 return rcStrict;
4856
4857 void *pvMem;
4858 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
4859 if (rcStrict != VINF_SUCCESS)
4860 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
4861
4862 /*
4863 * Fill in the mapping table entry.
4864 */
4865 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
4866 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
4867 pIemCpu->iNextMapping = iMemMap + 1;
4868 pIemCpu->cActiveMappings++;
4869
4870 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4871 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4872 pIemCpu->cbWritten += (uint32_t)cbMem;
4873 *ppvMem = pvMem;
4874 return VINF_SUCCESS;
4875}
4876
4877
4878/**
4879 * Commits the guest memory if bounce buffered and unmaps it.
4880 *
4881 * @returns Strict VBox status code.
4882 * @param pIemCpu The IEM per CPU data.
4883 * @param pvMem The mapping.
4884 * @param fAccess The kind of access.
4885 */
4886static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4887{
4888 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
4889 AssertReturn(iMemMap >= 0, iMemMap);
4890
4891 /*
4892 * If it's bounce buffered, we need to write back the buffer.
4893 */
4894 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4895 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4896 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
4897
4898 /* Free the entry. */
4899 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4900 Assert(pIemCpu->cActiveMappings != 0);
4901 pIemCpu->cActiveMappings--;
4902 return VINF_SUCCESS;
4903}
4904
4905
4906/**
4907 * Fetches a data byte.
4908 *
4909 * @returns Strict VBox status code.
4910 * @param pIemCpu The IEM per CPU data.
4911 * @param pu8Dst Where to return the byte.
4912 * @param iSegReg The index of the segment register to use for
4913 * this access. The base and limits are checked.
4914 * @param GCPtrMem The address of the guest memory.
4915 */
4916static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4917{
4918 /* The lazy approach for now... */
4919 uint8_t const *pu8Src;
4920 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4921 if (rc == VINF_SUCCESS)
4922 {
4923 *pu8Dst = *pu8Src;
4924 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4925 }
4926 return rc;
4927}
4928
4929
4930/**
4931 * Fetches a data word.
4932 *
4933 * @returns Strict VBox status code.
4934 * @param pIemCpu The IEM per CPU data.
4935 * @param pu16Dst Where to return the word.
4936 * @param iSegReg The index of the segment register to use for
4937 * this access. The base and limits are checked.
4938 * @param GCPtrMem The address of the guest memory.
4939 */
4940static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4941{
4942 /* The lazy approach for now... */
4943 uint16_t const *pu16Src;
4944 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4945 if (rc == VINF_SUCCESS)
4946 {
4947 *pu16Dst = *pu16Src;
4948 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
4949 }
4950 return rc;
4951}
4952
4953
4954/**
4955 * Fetches a data dword.
4956 *
4957 * @returns Strict VBox status code.
4958 * @param pIemCpu The IEM per CPU data.
4959 * @param pu32Dst Where to return the dword.
4960 * @param iSegReg The index of the segment register to use for
4961 * this access. The base and limits are checked.
4962 * @param GCPtrMem The address of the guest memory.
4963 */
4964static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4965{
4966 /* The lazy approach for now... */
4967 uint32_t const *pu32Src;
4968 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4969 if (rc == VINF_SUCCESS)
4970 {
4971 *pu32Dst = *pu32Src;
4972 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
4973 }
4974 return rc;
4975}
4976
4977
4978#ifdef SOME_UNUSED_FUNCTION
4979/**
4980 * Fetches a data dword and sign extends it to a qword.
4981 *
4982 * @returns Strict VBox status code.
4983 * @param pIemCpu The IEM per CPU data.
4984 * @param pu64Dst Where to return the sign extended value.
4985 * @param iSegReg The index of the segment register to use for
4986 * this access. The base and limits are checked.
4987 * @param GCPtrMem The address of the guest memory.
4988 */
4989static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4990{
4991 /* The lazy approach for now... */
4992 int32_t const *pi32Src;
4993 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4994 if (rc == VINF_SUCCESS)
4995 {
4996 *pu64Dst = *pi32Src;
4997 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
4998 }
4999#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5000 else
5001 *pu64Dst = 0;
5002#endif
5003 return rc;
5004}
5005#endif
5006
5007
5008/**
5009 * Fetches a data qword.
5010 *
5011 * @returns Strict VBox status code.
5012 * @param pIemCpu The IEM per CPU data.
5013 * @param pu64Dst Where to return the qword.
5014 * @param iSegReg The index of the segment register to use for
5015 * this access. The base and limits are checked.
5016 * @param GCPtrMem The address of the guest memory.
5017 */
5018static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5019{
5020 /* The lazy approach for now... */
5021 uint64_t const *pu64Src;
5022 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5023 if (rc == VINF_SUCCESS)
5024 {
5025 *pu64Dst = *pu64Src;
5026 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5027 }
5028 return rc;
5029}
5030
5031
5032/**
5033 * Fetches a data tword.
5034 *
5035 * @returns Strict VBox status code.
5036 * @param pIemCpu The IEM per CPU data.
5037 * @param pr80Dst Where to return the tword.
5038 * @param iSegReg The index of the segment register to use for
5039 * this access. The base and limits are checked.
5040 * @param GCPtrMem The address of the guest memory.
5041 */
5042static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5043{
5044 /* The lazy approach for now... */
5045 PCRTFLOAT80U pr80Src;
5046 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5047 if (rc == VINF_SUCCESS)
5048 {
5049 *pr80Dst = *pr80Src;
5050 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5051 }
5052 return rc;
5053}
5054
5055
5056/**
5057 * Fetches a descriptor register (lgdt, lidt).
5058 *
5059 * @returns Strict VBox status code.
5060 * @param pIemCpu The IEM per CPU data.
5061 * @param pcbLimit Where to return the limit.
5062 * @param pGCPTrBase Where to return the base.
5063 * @param iSegReg The index of the segment register to use for
5064 * this access. The base and limits are checked.
5065 * @param GCPtrMem The address of the guest memory.
5066 * @param enmOpSize The effective operand size.
5067 */
5068static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5069 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5070{
5071 uint8_t const *pu8Src;
5072 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5073 (void **)&pu8Src,
5074 enmOpSize == IEMMODE_64BIT
5075 ? 2 + 8
5076 : enmOpSize == IEMMODE_32BIT
5077 ? 2 + 4
5078 : 2 + 3,
5079 iSegReg,
5080 GCPtrMem,
5081 IEM_ACCESS_DATA_R);
5082 if (rcStrict == VINF_SUCCESS)
5083 {
5084 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5085 switch (enmOpSize)
5086 {
5087 case IEMMODE_16BIT:
5088 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5089 break;
5090 case IEMMODE_32BIT:
5091 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5092 break;
5093 case IEMMODE_64BIT:
5094 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5095 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5096 break;
5097
5098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5099 }
5100 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5101 }
5102 return rcStrict;
5103}
5104
5105
5106
5107/**
5108 * Stores a data byte.
5109 *
5110 * @returns Strict VBox status code.
5111 * @param pIemCpu The IEM per CPU data.
5112 * @param iSegReg The index of the segment register to use for
5113 * this access. The base and limits are checked.
5114 * @param GCPtrMem The address of the guest memory.
5115 * @param u8Value The value to store.
5116 */
5117static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5118{
5119 /* The lazy approach for now... */
5120 uint8_t *pu8Dst;
5121 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5122 if (rc == VINF_SUCCESS)
5123 {
5124 *pu8Dst = u8Value;
5125 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5126 }
5127 return rc;
5128}
5129
5130
5131/**
5132 * Stores a data word.
5133 *
5134 * @returns Strict VBox status code.
5135 * @param pIemCpu The IEM per CPU data.
5136 * @param iSegReg The index of the segment register to use for
5137 * this access. The base and limits are checked.
5138 * @param GCPtrMem The address of the guest memory.
5139 * @param u16Value The value to store.
5140 */
5141static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5142{
5143 /* The lazy approach for now... */
5144 uint16_t *pu16Dst;
5145 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5146 if (rc == VINF_SUCCESS)
5147 {
5148 *pu16Dst = u16Value;
5149 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5150 }
5151 return rc;
5152}
5153
5154
5155/**
5156 * Stores a data dword.
5157 *
5158 * @returns Strict VBox status code.
5159 * @param pIemCpu The IEM per CPU data.
5160 * @param iSegReg The index of the segment register to use for
5161 * this access. The base and limits are checked.
5162 * @param GCPtrMem The address of the guest memory.
5163 * @param u32Value The value to store.
5164 */
5165static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5166{
5167 /* The lazy approach for now... */
5168 uint32_t *pu32Dst;
5169 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5170 if (rc == VINF_SUCCESS)
5171 {
5172 *pu32Dst = u32Value;
5173 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5174 }
5175 return rc;
5176}
5177
5178
5179/**
5180 * Stores a data qword.
5181 *
5182 * @returns Strict VBox status code.
5183 * @param pIemCpu The IEM per CPU data.
5184 * @param iSegReg The index of the segment register to use for
5185 * this access. The base and limits are checked.
5186 * @param GCPtrMem The address of the guest memory.
5187 * @param u64Value The value to store.
5188 */
5189static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5190{
5191 /* The lazy approach for now... */
5192 uint64_t *pu64Dst;
5193 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5194 if (rc == VINF_SUCCESS)
5195 {
5196 *pu64Dst = u64Value;
5197 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5198 }
5199 return rc;
5200}
5201
5202
5203/**
5204 * Pushes a word onto the stack.
5205 *
5206 * @returns Strict VBox status code.
5207 * @param pIemCpu The IEM per CPU data.
5208 * @param u16Value The value to push.
5209 */
5210static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5211{
5212 /* Increment the stack pointer. */
5213 uint64_t uNewRsp;
5214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5215 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5216
5217 /* Write the word the lazy way. */
5218 uint16_t *pu16Dst;
5219 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5220 if (rc == VINF_SUCCESS)
5221 {
5222 *pu16Dst = u16Value;
5223 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5224 }
5225
5226 /* Commit the new RSP value unless we an access handler made trouble. */
5227 if (rc == VINF_SUCCESS)
5228 pCtx->rsp = uNewRsp;
5229
5230 return rc;
5231}
5232
5233
5234/**
5235 * Pushes a dword onto the stack.
5236 *
5237 * @returns Strict VBox status code.
5238 * @param pIemCpu The IEM per CPU data.
5239 * @param u32Value The value to push.
5240 */
5241static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5242{
5243 /* Increment the stack pointer. */
5244 uint64_t uNewRsp;
5245 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5246 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5247
5248 /* Write the word the lazy way. */
5249 uint32_t *pu32Dst;
5250 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5251 if (rc == VINF_SUCCESS)
5252 {
5253 *pu32Dst = u32Value;
5254 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5255 }
5256
5257 /* Commit the new RSP value unless we an access handler made trouble. */
5258 if (rc == VINF_SUCCESS)
5259 pCtx->rsp = uNewRsp;
5260
5261 return rc;
5262}
5263
5264
5265/**
5266 * Pushes a qword onto the stack.
5267 *
5268 * @returns Strict VBox status code.
5269 * @param pIemCpu The IEM per CPU data.
5270 * @param u64Value The value to push.
5271 */
5272static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5273{
5274 /* Increment the stack pointer. */
5275 uint64_t uNewRsp;
5276 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5277 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5278
5279 /* Write the word the lazy way. */
5280 uint64_t *pu64Dst;
5281 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5282 if (rc == VINF_SUCCESS)
5283 {
5284 *pu64Dst = u64Value;
5285 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5286 }
5287
5288 /* Commit the new RSP value unless we an access handler made trouble. */
5289 if (rc == VINF_SUCCESS)
5290 pCtx->rsp = uNewRsp;
5291
5292 return rc;
5293}
5294
5295
5296/**
5297 * Pops a word from the stack.
5298 *
5299 * @returns Strict VBox status code.
5300 * @param pIemCpu The IEM per CPU data.
5301 * @param pu16Value Where to store the popped value.
5302 */
5303static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5304{
5305 /* Increment the stack pointer. */
5306 uint64_t uNewRsp;
5307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5308 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5309
5310 /* Write the word the lazy way. */
5311 uint16_t const *pu16Src;
5312 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5313 if (rc == VINF_SUCCESS)
5314 {
5315 *pu16Value = *pu16Src;
5316 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5317
5318 /* Commit the new RSP value. */
5319 if (rc == VINF_SUCCESS)
5320 pCtx->rsp = uNewRsp;
5321 }
5322
5323 return rc;
5324}
5325
5326
5327/**
5328 * Pops a dword from the stack.
5329 *
5330 * @returns Strict VBox status code.
5331 * @param pIemCpu The IEM per CPU data.
5332 * @param pu32Value Where to store the popped value.
5333 */
5334static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5335{
5336 /* Increment the stack pointer. */
5337 uint64_t uNewRsp;
5338 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5339 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5340
5341 /* Write the word the lazy way. */
5342 uint32_t const *pu32Src;
5343 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5344 if (rc == VINF_SUCCESS)
5345 {
5346 *pu32Value = *pu32Src;
5347 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5348
5349 /* Commit the new RSP value. */
5350 if (rc == VINF_SUCCESS)
5351 pCtx->rsp = uNewRsp;
5352 }
5353
5354 return rc;
5355}
5356
5357
5358/**
5359 * Pops a qword from the stack.
5360 *
5361 * @returns Strict VBox status code.
5362 * @param pIemCpu The IEM per CPU data.
5363 * @param pu64Value Where to store the popped value.
5364 */
5365static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5366{
5367 /* Increment the stack pointer. */
5368 uint64_t uNewRsp;
5369 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5370 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5371
5372 /* Write the word the lazy way. */
5373 uint64_t const *pu64Src;
5374 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5375 if (rc == VINF_SUCCESS)
5376 {
5377 *pu64Value = *pu64Src;
5378 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5379
5380 /* Commit the new RSP value. */
5381 if (rc == VINF_SUCCESS)
5382 pCtx->rsp = uNewRsp;
5383 }
5384
5385 return rc;
5386}
5387
5388
5389/**
5390 * Pushes a word onto the stack, using a temporary stack pointer.
5391 *
5392 * @returns Strict VBox status code.
5393 * @param pIemCpu The IEM per CPU data.
5394 * @param u16Value The value to push.
5395 * @param pTmpRsp Pointer to the temporary stack pointer.
5396 */
5397static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5398{
5399 /* Increment the stack pointer. */
5400 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5401 RTUINT64U NewRsp = *pTmpRsp;
5402 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5403
5404 /* Write the word the lazy way. */
5405 uint16_t *pu16Dst;
5406 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5407 if (rc == VINF_SUCCESS)
5408 {
5409 *pu16Dst = u16Value;
5410 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5411 }
5412
5413 /* Commit the new RSP value unless we an access handler made trouble. */
5414 if (rc == VINF_SUCCESS)
5415 *pTmpRsp = NewRsp;
5416
5417 return rc;
5418}
5419
5420
5421/**
5422 * Pushes a dword onto the stack, using a temporary stack pointer.
5423 *
5424 * @returns Strict VBox status code.
5425 * @param pIemCpu The IEM per CPU data.
5426 * @param u32Value The value to push.
5427 * @param pTmpRsp Pointer to the temporary stack pointer.
5428 */
5429static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5430{
5431 /* Increment the stack pointer. */
5432 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5433 RTUINT64U NewRsp = *pTmpRsp;
5434 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5435
5436 /* Write the word the lazy way. */
5437 uint32_t *pu32Dst;
5438 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5439 if (rc == VINF_SUCCESS)
5440 {
5441 *pu32Dst = u32Value;
5442 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5443 }
5444
5445 /* Commit the new RSP value unless we an access handler made trouble. */
5446 if (rc == VINF_SUCCESS)
5447 *pTmpRsp = NewRsp;
5448
5449 return rc;
5450}
5451
5452
5453#ifdef SOME_UNUSED_FUNCTION
5454/**
5455 * Pushes a dword onto the stack, using a temporary stack pointer.
5456 *
5457 * @returns Strict VBox status code.
5458 * @param pIemCpu The IEM per CPU data.
5459 * @param u64Value The value to push.
5460 * @param pTmpRsp Pointer to the temporary stack pointer.
5461 */
5462static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5463{
5464 /* Increment the stack pointer. */
5465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5466 RTUINT64U NewRsp = *pTmpRsp;
5467 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5468
5469 /* Write the word the lazy way. */
5470 uint64_t *pu64Dst;
5471 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5472 if (rc == VINF_SUCCESS)
5473 {
5474 *pu64Dst = u64Value;
5475 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5476 }
5477
5478 /* Commit the new RSP value unless we an access handler made trouble. */
5479 if (rc == VINF_SUCCESS)
5480 *pTmpRsp = NewRsp;
5481
5482 return rc;
5483}
5484#endif
5485
5486
5487/**
5488 * Pops a word from the stack, using a temporary stack pointer.
5489 *
5490 * @returns Strict VBox status code.
5491 * @param pIemCpu The IEM per CPU data.
5492 * @param pu16Value Where to store the popped value.
5493 * @param pTmpRsp Pointer to the temporary stack pointer.
5494 */
5495static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5496{
5497 /* Increment the stack pointer. */
5498 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5499 RTUINT64U NewRsp = *pTmpRsp;
5500 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5501
5502 /* Write the word the lazy way. */
5503 uint16_t const *pu16Src;
5504 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5505 if (rc == VINF_SUCCESS)
5506 {
5507 *pu16Value = *pu16Src;
5508 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5509
5510 /* Commit the new RSP value. */
5511 if (rc == VINF_SUCCESS)
5512 *pTmpRsp = NewRsp;
5513 }
5514
5515 return rc;
5516}
5517
5518
5519/**
5520 * Pops a dword from the stack, using a temporary stack pointer.
5521 *
5522 * @returns Strict VBox status code.
5523 * @param pIemCpu The IEM per CPU data.
5524 * @param pu32Value Where to store the popped value.
5525 * @param pTmpRsp Pointer to the temporary stack pointer.
5526 */
5527static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5528{
5529 /* Increment the stack pointer. */
5530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5531 RTUINT64U NewRsp = *pTmpRsp;
5532 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5533
5534 /* Write the word the lazy way. */
5535 uint32_t const *pu32Src;
5536 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5537 if (rc == VINF_SUCCESS)
5538 {
5539 *pu32Value = *pu32Src;
5540 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5541
5542 /* Commit the new RSP value. */
5543 if (rc == VINF_SUCCESS)
5544 *pTmpRsp = NewRsp;
5545 }
5546
5547 return rc;
5548}
5549
5550
5551/**
5552 * Pops a qword from the stack, using a temporary stack pointer.
5553 *
5554 * @returns Strict VBox status code.
5555 * @param pIemCpu The IEM per CPU data.
5556 * @param pu64Value Where to store the popped value.
5557 * @param pTmpRsp Pointer to the temporary stack pointer.
5558 */
5559static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5560{
5561 /* Increment the stack pointer. */
5562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5563 RTUINT64U NewRsp = *pTmpRsp;
5564 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5565
5566 /* Write the word the lazy way. */
5567 uint64_t const *pu64Src;
5568 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5569 if (rcStrict == VINF_SUCCESS)
5570 {
5571 *pu64Value = *pu64Src;
5572 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5573
5574 /* Commit the new RSP value. */
5575 if (rcStrict == VINF_SUCCESS)
5576 *pTmpRsp = NewRsp;
5577 }
5578
5579 return rcStrict;
5580}
5581
5582
5583/**
5584 * Begin a special stack push (used by interrupt, exceptions and such).
5585 *
5586 * This will raise #SS or #PF if appropriate.
5587 *
5588 * @returns Strict VBox status code.
5589 * @param pIemCpu The IEM per CPU data.
5590 * @param cbMem The number of bytes to push onto the stack.
5591 * @param ppvMem Where to return the pointer to the stack memory.
5592 * As with the other memory functions this could be
5593 * direct access or bounce buffered access, so
5594 * don't commit register until the commit call
5595 * succeeds.
5596 * @param puNewRsp Where to return the new RSP value. This must be
5597 * passed unchanged to
5598 * iemMemStackPushCommitSpecial().
5599 */
5600static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
5601{
5602 Assert(cbMem < UINT8_MAX);
5603 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5604 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
5605 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5606}
5607
5608
5609/**
5610 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
5611 *
5612 * This will update the rSP.
5613 *
5614 * @returns Strict VBox status code.
5615 * @param pIemCpu The IEM per CPU data.
5616 * @param pvMem The pointer returned by
5617 * iemMemStackPushBeginSpecial().
5618 * @param uNewRsp The new RSP value returned by
5619 * iemMemStackPushBeginSpecial().
5620 */
5621static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
5622{
5623 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
5624 if (rcStrict == VINF_SUCCESS)
5625 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5626 return rcStrict;
5627}
5628
5629
5630/**
5631 * Begin a special stack pop (used by iret, retf and such).
5632 *
5633 * This will raise \#SS or \#PF if appropriate.
5634 *
5635 * @returns Strict VBox status code.
5636 * @param pIemCpu The IEM per CPU data.
5637 * @param cbMem The number of bytes to push onto the stack.
5638 * @param ppvMem Where to return the pointer to the stack memory.
5639 * @param puNewRsp Where to return the new RSP value. This must be
5640 * passed unchanged to
5641 * iemMemStackPopCommitSpecial() or applied
5642 * manually if iemMemStackPopDoneSpecial() is used.
5643 */
5644static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5645{
5646 Assert(cbMem < UINT8_MAX);
5647 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5648 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
5649 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5650}
5651
5652
5653/**
5654 * Continue a special stack pop (used by iret and retf).
5655 *
5656 * This will raise \#SS or \#PF if appropriate.
5657 *
5658 * @returns Strict VBox status code.
5659 * @param pIemCpu The IEM per CPU data.
5660 * @param cbMem The number of bytes to push onto the stack.
5661 * @param ppvMem Where to return the pointer to the stack memory.
5662 * @param puNewRsp Where to return the new RSP value. This must be
5663 * passed unchanged to
5664 * iemMemStackPopCommitSpecial() or applied
5665 * manually if iemMemStackPopDoneSpecial() is used.
5666 */
5667static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5668{
5669 Assert(cbMem < UINT8_MAX);
5670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5671 RTUINT64U NewRsp;
5672 NewRsp.u = *puNewRsp;
5673 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5674 *puNewRsp = NewRsp.u;
5675 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5676}
5677
5678
5679/**
5680 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
5681 *
5682 * This will update the rSP.
5683 *
5684 * @returns Strict VBox status code.
5685 * @param pIemCpu The IEM per CPU data.
5686 * @param pvMem The pointer returned by
5687 * iemMemStackPopBeginSpecial().
5688 * @param uNewRsp The new RSP value returned by
5689 * iemMemStackPopBeginSpecial().
5690 */
5691static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
5692{
5693 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5694 if (rcStrict == VINF_SUCCESS)
5695 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5696 return rcStrict;
5697}
5698
5699
5700/**
5701 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
5702 * iemMemStackPopContinueSpecial).
5703 *
5704 * The caller will manually commit the rSP.
5705 *
5706 * @returns Strict VBox status code.
5707 * @param pIemCpu The IEM per CPU data.
5708 * @param pvMem The pointer returned by
5709 * iemMemStackPopBeginSpecial() or
5710 * iemMemStackPopContinueSpecial().
5711 */
5712static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
5713{
5714 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5715}
5716
5717
5718/**
5719 * Fetches a system table dword.
5720 *
5721 * @returns Strict VBox status code.
5722 * @param pIemCpu The IEM per CPU data.
5723 * @param pu32Dst Where to return the dword.
5724 * @param iSegReg The index of the segment register to use for
5725 * this access. The base and limits are checked.
5726 * @param GCPtrMem The address of the guest memory.
5727 */
5728static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5729{
5730 /* The lazy approach for now... */
5731 uint32_t const *pu32Src;
5732 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5733 if (rc == VINF_SUCCESS)
5734 {
5735 *pu32Dst = *pu32Src;
5736 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
5737 }
5738 return rc;
5739}
5740
5741
5742/**
5743 * Fetches a system table qword.
5744 *
5745 * @returns Strict VBox status code.
5746 * @param pIemCpu The IEM per CPU data.
5747 * @param pu64Dst Where to return the qword.
5748 * @param iSegReg The index of the segment register to use for
5749 * this access. The base and limits are checked.
5750 * @param GCPtrMem The address of the guest memory.
5751 */
5752static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5753{
5754 /* The lazy approach for now... */
5755 uint64_t const *pu64Src;
5756 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5757 if (rc == VINF_SUCCESS)
5758 {
5759 *pu64Dst = *pu64Src;
5760 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
5761 }
5762 return rc;
5763}
5764
5765
5766/**
5767 * Fetches a descriptor table entry.
5768 *
5769 * @returns Strict VBox status code.
5770 * @param pIemCpu The IEM per CPU.
5771 * @param pDesc Where to return the descriptor table entry.
5772 * @param uSel The selector which table entry to fetch.
5773 */
5774static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
5775{
5776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5777
5778 /** @todo did the 286 require all 8 bytes to be accessible? */
5779 /*
5780 * Get the selector table base and check bounds.
5781 */
5782 RTGCPTR GCPtrBase;
5783 if (uSel & X86_SEL_LDT)
5784 {
5785 if ( !pCtx->ldtr.Attr.n.u1Present
5786 || (uSel | 0x7U) > pCtx->ldtr.u32Limit )
5787 {
5788 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
5789 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
5790 /** @todo is this the right exception? */
5791 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5792 }
5793
5794 Assert(pCtx->ldtr.Attr.n.u1Present);
5795 GCPtrBase = pCtx->ldtr.u64Base;
5796 }
5797 else
5798 {
5799 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
5800 {
5801 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
5802 /** @todo is this the right exception? */
5803 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5804 }
5805 GCPtrBase = pCtx->gdtr.pGdt;
5806 }
5807
5808 /*
5809 * Read the legacy descriptor and maybe the long mode extensions if
5810 * required.
5811 */
5812 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5813 if (rcStrict == VINF_SUCCESS)
5814 {
5815 if ( !IEM_IS_LONG_MODE(pIemCpu)
5816 || pDesc->Legacy.Gen.u1DescType)
5817 pDesc->Long.au64[1] = 0;
5818 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
5819 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5820 else
5821 {
5822 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
5823 /** @todo is this the right exception? */
5824 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5825 }
5826 }
5827 return rcStrict;
5828}
5829
5830
5831/**
5832 * Fakes a long mode stack selector for SS = 0.
5833 *
5834 * @param pDescSs Where to return the fake stack descriptor.
5835 * @param uDpl The DPL we want.
5836 */
5837static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
5838{
5839 pDescSs->Long.au64[0] = 0;
5840 pDescSs->Long.au64[1] = 0;
5841 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
5842 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
5843 pDescSs->Long.Gen.u2Dpl = uDpl;
5844 pDescSs->Long.Gen.u1Present = 1;
5845 pDescSs->Long.Gen.u1Long = 1;
5846}
5847
5848
5849/**
5850 * Marks the selector descriptor as accessed (only non-system descriptors).
5851 *
5852 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
5853 * will therefore skip the limit checks.
5854 *
5855 * @returns Strict VBox status code.
5856 * @param pIemCpu The IEM per CPU.
5857 * @param uSel The selector.
5858 */
5859static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
5860{
5861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5862
5863 /*
5864 * Get the selector table base and calculate the entry address.
5865 */
5866 RTGCPTR GCPtr = uSel & X86_SEL_LDT
5867 ? pCtx->ldtr.u64Base
5868 : pCtx->gdtr.pGdt;
5869 GCPtr += uSel & X86_SEL_MASK;
5870
5871 /*
5872 * ASMAtomicBitSet will assert if the address is misaligned, so do some
5873 * ugly stuff to avoid this. This will make sure it's an atomic access
5874 * as well more or less remove any question about 8-bit or 32-bit accesss.
5875 */
5876 VBOXSTRICTRC rcStrict;
5877 uint32_t volatile *pu32;
5878 if ((GCPtr & 3) == 0)
5879 {
5880 /* The normal case, map the 32-bit bits around the accessed bit (40). */
5881 GCPtr += 2 + 2;
5882 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5883 if (rcStrict != VINF_SUCCESS)
5884 return rcStrict;
5885 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
5886 }
5887 else
5888 {
5889 /* The misaligned GDT/LDT case, map the whole thing. */
5890 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5891 if (rcStrict != VINF_SUCCESS)
5892 return rcStrict;
5893 switch ((uintptr_t)pu32 & 3)
5894 {
5895 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
5896 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
5897 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
5898 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
5899 }
5900 }
5901
5902 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
5903}
5904
5905/** @} */
5906
5907
5908/*
5909 * Include the C/C++ implementation of instruction.
5910 */
5911#include "IEMAllCImpl.cpp.h"
5912
5913
5914
5915/** @name "Microcode" macros.
5916 *
5917 * The idea is that we should be able to use the same code to interpret
5918 * instructions as well as recompiler instructions. Thus this obfuscation.
5919 *
5920 * @{
5921 */
5922#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
5923#define IEM_MC_END() }
5924#define IEM_MC_PAUSE() do {} while (0)
5925#define IEM_MC_CONTINUE() do {} while (0)
5926
5927/** Internal macro. */
5928#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5929 do \
5930 { \
5931 VBOXSTRICTRC rcStrict2 = a_Expr; \
5932 if (rcStrict2 != VINF_SUCCESS) \
5933 return rcStrict2; \
5934 } while (0)
5935
5936#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5937#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5938#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5939#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5940#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5941#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5942#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5943
5944#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5945#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
5946 do { \
5947 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
5948 return iemRaiseDeviceNotAvailable(pIemCpu); \
5949 } while (0)
5950#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
5951 do { \
5952 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
5953 return iemRaiseMathFault(pIemCpu); \
5954 } while (0)
5955#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
5956 do { \
5957 if (pIemCpu->uCpl != 0) \
5958 return iemRaiseGeneralProtectionFault0(pIemCpu); \
5959 } while (0)
5960
5961
5962#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5963#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5964#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5965#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5966#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5967#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
5968#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5969 uint32_t a_Name; \
5970 uint32_t *a_pName = &a_Name
5971#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5972 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5973
5974#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5975#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
5976
5977#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5978#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5979#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5980#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5981#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5982#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5983#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5984#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5985#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5986#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5987#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5988#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5989#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5990#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5991#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
5992#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5993#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
5994#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5995#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5996#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5997#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5998#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5999#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6000#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6001#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6002#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6003#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6004
6005#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6006#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6007#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6008#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6009#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6010#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6011#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6012#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6013#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6014#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6015#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6016 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6017
6018#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6019#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6020/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6021 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6022#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6023#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6024#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6025
6026#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6027#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6028#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6029 do { \
6030 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6031 *pu32Reg += (a_u32Value); \
6032 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6033 } while (0)
6034#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6035
6036#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6037#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6038#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6039 do { \
6040 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6041 *pu32Reg -= (a_u32Value); \
6042 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6043 } while (0)
6044#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6045
6046#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6047#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6048#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6049#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6050#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6051#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6052#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6053
6054#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6055#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6056#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6057#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6058
6059#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6060#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6061#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6062
6063#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6064#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6065
6066#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6067#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6068#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6069
6070#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6071#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6072#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6073
6074#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6075
6076#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6077
6078#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6079#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6080#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6081 do { \
6082 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6083 *pu32Reg &= (a_u32Value); \
6084 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6085 } while (0)
6086#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6087
6088#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6089#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6090#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6091 do { \
6092 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6093 *pu32Reg |= (a_u32Value); \
6094 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6095 } while (0)
6096#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6097
6098
6099#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6100#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6101#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6102
6103#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6104
6105
6106#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6107 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6108#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6109 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6110#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6111 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6112
6113#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6114 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6115#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6116 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6117#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6118 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6119
6120#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6121 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6122#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6123 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6124#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6125 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6126
6127#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6128 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6129
6130#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6131 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6132#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6133 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6134
6135#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6136 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6137#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6138 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6139#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6140 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6141
6142
6143#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6144 do { \
6145 uint8_t u8Tmp; \
6146 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6147 (a_u16Dst) = u8Tmp; \
6148 } while (0)
6149#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6150 do { \
6151 uint8_t u8Tmp; \
6152 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6153 (a_u32Dst) = u8Tmp; \
6154 } while (0)
6155#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6156 do { \
6157 uint8_t u8Tmp; \
6158 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6159 (a_u64Dst) = u8Tmp; \
6160 } while (0)
6161#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6162 do { \
6163 uint16_t u16Tmp; \
6164 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6165 (a_u32Dst) = u16Tmp; \
6166 } while (0)
6167#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6168 do { \
6169 uint16_t u16Tmp; \
6170 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6171 (a_u64Dst) = u16Tmp; \
6172 } while (0)
6173#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6174 do { \
6175 uint32_t u32Tmp; \
6176 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6177 (a_u64Dst) = u32Tmp; \
6178 } while (0)
6179
6180#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6181 do { \
6182 uint8_t u8Tmp; \
6183 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6184 (a_u16Dst) = (int8_t)u8Tmp; \
6185 } while (0)
6186#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6187 do { \
6188 uint8_t u8Tmp; \
6189 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6190 (a_u32Dst) = (int8_t)u8Tmp; \
6191 } while (0)
6192#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6193 do { \
6194 uint8_t u8Tmp; \
6195 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6196 (a_u64Dst) = (int8_t)u8Tmp; \
6197 } while (0)
6198#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6199 do { \
6200 uint16_t u16Tmp; \
6201 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6202 (a_u32Dst) = (int16_t)u16Tmp; \
6203 } while (0)
6204#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6205 do { \
6206 uint16_t u16Tmp; \
6207 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6208 (a_u64Dst) = (int16_t)u16Tmp; \
6209 } while (0)
6210#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6211 do { \
6212 uint32_t u32Tmp; \
6213 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6214 (a_u64Dst) = (int32_t)u32Tmp; \
6215 } while (0)
6216
6217#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6218 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6219#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6220 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6221#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6222 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6223#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6224 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6225
6226#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6227 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6228#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6229 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6230#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6231 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6232#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6233 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6234
6235#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6236#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6237#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6238#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6239#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6240#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6241#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6242 do { \
6243 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6244 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6245 } while (0)
6246
6247
6248#define IEM_MC_PUSH_U16(a_u16Value) \
6249 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6250#define IEM_MC_PUSH_U32(a_u32Value) \
6251 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6252#define IEM_MC_PUSH_U64(a_u64Value) \
6253 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6254
6255#define IEM_MC_POP_U16(a_pu16Value) \
6256 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6257#define IEM_MC_POP_U32(a_pu32Value) \
6258 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6259#define IEM_MC_POP_U64(a_pu64Value) \
6260 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6261
6262/** Maps guest memory for direct or bounce buffered access.
6263 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6264 * @remarks May return.
6265 */
6266#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6267 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6268
6269/** Maps guest memory for direct or bounce buffered access.
6270 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6271 * @remarks May return.
6272 */
6273#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6274 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6275
6276/** Commits the memory and unmaps the guest memory.
6277 * @remarks May return.
6278 */
6279#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6280 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6281
6282/** Commits the memory and unmaps the guest memory unless the FPU status word
6283 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6284 * that would cause FLD not to store.
6285 *
6286 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6287 * store, while \#P will not.
6288 *
6289 * @remarks May in theory return - for now.
6290 */
6291#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6292 do { \
6293 if ( !(a_u16FSW & X86_FSW_ES) \
6294 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6295 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6296 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6297 } while (0)
6298
6299/** Calculate efficient address from R/M. */
6300#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6301 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6302
6303#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6304#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6305#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6306#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6307#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6308
6309/**
6310 * Defers the rest of the instruction emulation to a C implementation routine
6311 * and returns, only taking the standard parameters.
6312 *
6313 * @param a_pfnCImpl The pointer to the C routine.
6314 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6315 */
6316#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6317
6318/**
6319 * Defers the rest of instruction emulation to a C implementation routine and
6320 * returns, taking one argument in addition to the standard ones.
6321 *
6322 * @param a_pfnCImpl The pointer to the C routine.
6323 * @param a0 The argument.
6324 */
6325#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6326
6327/**
6328 * Defers the rest of the instruction emulation to a C implementation routine
6329 * and returns, taking two arguments in addition to the standard ones.
6330 *
6331 * @param a_pfnCImpl The pointer to the C routine.
6332 * @param a0 The first extra argument.
6333 * @param a1 The second extra argument.
6334 */
6335#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6336
6337/**
6338 * Defers the rest of the instruction emulation to a C implementation routine
6339 * and returns, taking two arguments in addition to the standard ones.
6340 *
6341 * @param a_pfnCImpl The pointer to the C routine.
6342 * @param a0 The first extra argument.
6343 * @param a1 The second extra argument.
6344 * @param a2 The third extra argument.
6345 */
6346#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6347
6348/**
6349 * Defers the rest of the instruction emulation to a C implementation routine
6350 * and returns, taking two arguments in addition to the standard ones.
6351 *
6352 * @param a_pfnCImpl The pointer to the C routine.
6353 * @param a0 The first extra argument.
6354 * @param a1 The second extra argument.
6355 * @param a2 The third extra argument.
6356 * @param a3 The fourth extra argument.
6357 * @param a4 The fifth extra argument.
6358 */
6359#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6360
6361/**
6362 * Defers the entire instruction emulation to a C implementation routine and
6363 * returns, only taking the standard parameters.
6364 *
6365 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6366 *
6367 * @param a_pfnCImpl The pointer to the C routine.
6368 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6369 */
6370#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6371
6372/**
6373 * Defers the entire instruction emulation to a C implementation routine and
6374 * returns, taking one argument in addition to the standard ones.
6375 *
6376 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6377 *
6378 * @param a_pfnCImpl The pointer to the C routine.
6379 * @param a0 The argument.
6380 */
6381#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6382
6383/**
6384 * Defers the entire instruction emulation to a C implementation routine and
6385 * returns, taking two arguments in addition to the standard ones.
6386 *
6387 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6388 *
6389 * @param a_pfnCImpl The pointer to the C routine.
6390 * @param a0 The first extra argument.
6391 * @param a1 The second extra argument.
6392 */
6393#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6394
6395/**
6396 * Defers the entire instruction emulation to a C implementation routine and
6397 * returns, taking three arguments in addition to the standard ones.
6398 *
6399 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6400 *
6401 * @param a_pfnCImpl The pointer to the C routine.
6402 * @param a0 The first extra argument.
6403 * @param a1 The second extra argument.
6404 * @param a2 The third extra argument.
6405 */
6406#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6407
6408/**
6409 * Calls a FPU assembly implementation taking one visible argument.
6410 *
6411 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6412 * @param a0 The first extra argument.
6413 */
6414#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6415 do { \
6416 iemFpuPrepareUsage(pIemCpu); \
6417 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6418 } while (0)
6419
6420/**
6421 * Calls a FPU assembly implementation taking two visible arguments.
6422 *
6423 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6424 * @param a0 The first extra argument.
6425 * @param a1 The second extra argument.
6426 */
6427#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6428 do { \
6429 iemFpuPrepareUsage(pIemCpu); \
6430 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6431 } while (0)
6432
6433/**
6434 * Calls a FPU assembly implementation taking three visible arguments.
6435 *
6436 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6437 * @param a0 The first extra argument.
6438 * @param a1 The second extra argument.
6439 * @param a2 The third extra argument.
6440 */
6441#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6442 do { \
6443 iemFpuPrepareUsage(pIemCpu); \
6444 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6445 } while (0)
6446
6447#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6448 do { \
6449 (a_FpuData).FSW = (a_FSW); \
6450 (a_FpuData).r80Result = *(a_pr80Value); \
6451 } while (0)
6452
6453/** Pushes FPU result onto the stack. */
6454#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6455 iemFpuPushResult(pIemCpu, &a_FpuData)
6456/** Pushes FPU result onto the stack and sets the FPUDP. */
6457#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6458 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6459
6460/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6461#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6462 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6463
6464/** Stores FPU result in a stack register. */
6465#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6466 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6467/** Stores FPU result in a stack register and pops the stack. */
6468#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6469 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6470/** Stores FPU result in a stack register and sets the FPUDP. */
6471#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6472 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6473/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6474 * stack. */
6475#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6476 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6477
6478/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6479#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6480 iemFpuUpdateOpcodeAndIp(pIemCpu)
6481/** Free a stack register (for FFREE and FFREEP). */
6482#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6483 iemFpuStackFree(pIemCpu, a_iStReg)
6484/** Increment the FPU stack pointer. */
6485#define IEM_MC_FPU_STACK_INC_TOP() \
6486 iemFpuStackIncTop(pIemCpu)
6487/** Decrement the FPU stack pointer. */
6488#define IEM_MC_FPU_STACK_DEC_TOP() \
6489 iemFpuStackDecTop(pIemCpu)
6490
6491/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6492#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6493 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6494/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6495#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6496 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6497/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6498#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6499 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6500/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6501#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6502 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6503/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6504 * stack. */
6505#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6506 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6507/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6508#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6509 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6510
6511/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6512#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6513 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6514/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6515 * stack. */
6516#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6517 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6518/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6519 * FPUDS. */
6520#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6521 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6522/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6523 * FPUDS. Pops stack. */
6524#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6525 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6526/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6527 * stack twice. */
6528#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6529 iemFpuStackUnderflowThenPopPop(pIemCpu)
6530/** Raises a FPU stack underflow exception for an instruction pushing a result
6531 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6532#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6533 iemFpuStackPushUnderflow(pIemCpu)
6534/** Raises a FPU stack underflow exception for an instruction pushing a result
6535 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6536#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6537 iemFpuStackPushUnderflowTwo(pIemCpu)
6538
6539/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6540 * FPUIP, FPUCS and FOP. */
6541#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6542 iemFpuStackPushOverflow(pIemCpu)
6543/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6544 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6545#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6546 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6547
6548#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6549#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6550#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6551#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6552#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6553 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6554 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6555#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
6556 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6557 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6558#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
6559 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6560 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6561 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6562#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
6563 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6564 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6565 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6566#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
6567#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
6568#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
6569#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6570 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6571 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6572#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6573 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6574 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6575#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6576 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6577 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6578#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6579 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6580 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6581#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6582 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6583 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6584#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6585 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6586 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6587#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
6588#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
6589#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
6590 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
6591#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
6592 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
6593#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
6594 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
6595#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
6596 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
6597#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
6598 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
6599#define IEM_MC_IF_FCW_IM() \
6600 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
6601
6602#define IEM_MC_ELSE() } else {
6603#define IEM_MC_ENDIF() } do {} while (0)
6604
6605/** @} */
6606
6607
6608/** @name Opcode Debug Helpers.
6609 * @{
6610 */
6611#ifdef DEBUG
6612# define IEMOP_MNEMONIC(a_szMnemonic) \
6613 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
6614 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
6615# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
6616 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
6617 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
6618#else
6619# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
6620# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
6621#endif
6622
6623/** @} */
6624
6625
6626/** @name Opcode Helpers.
6627 * @{
6628 */
6629
6630/** The instruction allows no lock prefixing (in this encoding), throw #UD if
6631 * lock prefixed.
6632 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
6633#define IEMOP_HLP_NO_LOCK_PREFIX() \
6634 do \
6635 { \
6636 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
6637 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6638 } while (0)
6639
6640/** The instruction is not available in 64-bit mode, throw #UD if we're in
6641 * 64-bit mode. */
6642#define IEMOP_HLP_NO_64BIT() \
6643 do \
6644 { \
6645 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6646 return IEMOP_RAISE_INVALID_OPCODE(); \
6647 } while (0)
6648
6649/** The instruction defaults to 64-bit operand size if 64-bit mode. */
6650#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
6651 do \
6652 { \
6653 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6654 iemRecalEffOpSize64Default(pIemCpu); \
6655 } while (0)
6656
6657/**
6658 * Done decoding.
6659 */
6660#define IEMOP_HLP_DONE_DECODING() \
6661 do \
6662 { \
6663 /*nothing for now, maybe later... */ \
6664 } while (0)
6665
6666/**
6667 * Done decoding, raise \#UD exception if lock prefix present.
6668 */
6669#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
6670 do \
6671 { \
6672 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
6673 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6674 } while (0)
6675
6676
6677/**
6678 * Calculates the effective address of a ModR/M memory operand.
6679 *
6680 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
6681 *
6682 * @return Strict VBox status code.
6683 * @param pIemCpu The IEM per CPU data.
6684 * @param bRm The ModRM byte.
6685 * @param pGCPtrEff Where to return the effective address.
6686 */
6687static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
6688{
6689 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
6690 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6691#define SET_SS_DEF() \
6692 do \
6693 { \
6694 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
6695 pIemCpu->iEffSeg = X86_SREG_SS; \
6696 } while (0)
6697
6698/** @todo Check the effective address size crap! */
6699 switch (pIemCpu->enmEffAddrMode)
6700 {
6701 case IEMMODE_16BIT:
6702 {
6703 uint16_t u16EffAddr;
6704
6705 /* Handle the disp16 form with no registers first. */
6706 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
6707 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
6708 else
6709 {
6710 /* Get the displacment. */
6711 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6712 {
6713 case 0: u16EffAddr = 0; break;
6714 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
6715 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
6716 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
6717 }
6718
6719 /* Add the base and index registers to the disp. */
6720 switch (bRm & X86_MODRM_RM_MASK)
6721 {
6722 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
6723 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
6724 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
6725 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
6726 case 4: u16EffAddr += pCtx->si; break;
6727 case 5: u16EffAddr += pCtx->di; break;
6728 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
6729 case 7: u16EffAddr += pCtx->bx; break;
6730 }
6731 }
6732
6733 *pGCPtrEff = u16EffAddr;
6734 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
6735 return VINF_SUCCESS;
6736 }
6737
6738 case IEMMODE_32BIT:
6739 {
6740 uint32_t u32EffAddr;
6741
6742 /* Handle the disp32 form with no registers first. */
6743 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
6744 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
6745 else
6746 {
6747 /* Get the register (or SIB) value. */
6748 switch ((bRm & X86_MODRM_RM_MASK))
6749 {
6750 case 0: u32EffAddr = pCtx->eax; break;
6751 case 1: u32EffAddr = pCtx->ecx; break;
6752 case 2: u32EffAddr = pCtx->edx; break;
6753 case 3: u32EffAddr = pCtx->ebx; break;
6754 case 4: /* SIB */
6755 {
6756 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6757
6758 /* Get the index and scale it. */
6759 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
6760 {
6761 case 0: u32EffAddr = pCtx->eax; break;
6762 case 1: u32EffAddr = pCtx->ecx; break;
6763 case 2: u32EffAddr = pCtx->edx; break;
6764 case 3: u32EffAddr = pCtx->ebx; break;
6765 case 4: u32EffAddr = 0; /*none */ break;
6766 case 5: u32EffAddr = pCtx->ebp; break;
6767 case 6: u32EffAddr = pCtx->esi; break;
6768 case 7: u32EffAddr = pCtx->edi; break;
6769 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6770 }
6771 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6772
6773 /* add base */
6774 switch (bSib & X86_SIB_BASE_MASK)
6775 {
6776 case 0: u32EffAddr += pCtx->eax; break;
6777 case 1: u32EffAddr += pCtx->ecx; break;
6778 case 2: u32EffAddr += pCtx->edx; break;
6779 case 3: u32EffAddr += pCtx->ebx; break;
6780 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
6781 case 5:
6782 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6783 {
6784 u32EffAddr += pCtx->ebp;
6785 SET_SS_DEF();
6786 }
6787 else
6788 {
6789 uint32_t u32Disp;
6790 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6791 u32EffAddr += u32Disp;
6792 }
6793 break;
6794 case 6: u32EffAddr += pCtx->esi; break;
6795 case 7: u32EffAddr += pCtx->edi; break;
6796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6797 }
6798 break;
6799 }
6800 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
6801 case 6: u32EffAddr = pCtx->esi; break;
6802 case 7: u32EffAddr = pCtx->edi; break;
6803 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6804 }
6805
6806 /* Get and add the displacement. */
6807 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6808 {
6809 case 0:
6810 break;
6811 case 1:
6812 {
6813 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6814 u32EffAddr += i8Disp;
6815 break;
6816 }
6817 case 2:
6818 {
6819 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6820 u32EffAddr += u32Disp;
6821 break;
6822 }
6823 default:
6824 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
6825 }
6826
6827 }
6828 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
6829 *pGCPtrEff = u32EffAddr;
6830 else
6831 {
6832 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
6833 *pGCPtrEff = u32EffAddr & UINT16_MAX;
6834 }
6835 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6836 return VINF_SUCCESS;
6837 }
6838
6839 case IEMMODE_64BIT:
6840 {
6841 uint64_t u64EffAddr;
6842
6843 /* Handle the rip+disp32 form with no registers first. */
6844 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
6845 {
6846 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
6847 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
6848 }
6849 else
6850 {
6851 /* Get the register (or SIB) value. */
6852 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
6853 {
6854 case 0: u64EffAddr = pCtx->rax; break;
6855 case 1: u64EffAddr = pCtx->rcx; break;
6856 case 2: u64EffAddr = pCtx->rdx; break;
6857 case 3: u64EffAddr = pCtx->rbx; break;
6858 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
6859 case 6: u64EffAddr = pCtx->rsi; break;
6860 case 7: u64EffAddr = pCtx->rdi; break;
6861 case 8: u64EffAddr = pCtx->r8; break;
6862 case 9: u64EffAddr = pCtx->r9; break;
6863 case 10: u64EffAddr = pCtx->r10; break;
6864 case 11: u64EffAddr = pCtx->r11; break;
6865 case 13: u64EffAddr = pCtx->r13; break;
6866 case 14: u64EffAddr = pCtx->r14; break;
6867 case 15: u64EffAddr = pCtx->r15; break;
6868 /* SIB */
6869 case 4:
6870 case 12:
6871 {
6872 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6873
6874 /* Get the index and scale it. */
6875 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
6876 {
6877 case 0: u64EffAddr = pCtx->rax; break;
6878 case 1: u64EffAddr = pCtx->rcx; break;
6879 case 2: u64EffAddr = pCtx->rdx; break;
6880 case 3: u64EffAddr = pCtx->rbx; break;
6881 case 4: u64EffAddr = 0; /*none */ break;
6882 case 5: u64EffAddr = pCtx->rbp; break;
6883 case 6: u64EffAddr = pCtx->rsi; break;
6884 case 7: u64EffAddr = pCtx->rdi; break;
6885 case 8: u64EffAddr = pCtx->r8; break;
6886 case 9: u64EffAddr = pCtx->r9; break;
6887 case 10: u64EffAddr = pCtx->r10; break;
6888 case 11: u64EffAddr = pCtx->r11; break;
6889 case 12: u64EffAddr = pCtx->r12; break;
6890 case 13: u64EffAddr = pCtx->r13; break;
6891 case 14: u64EffAddr = pCtx->r14; break;
6892 case 15: u64EffAddr = pCtx->r15; break;
6893 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6894 }
6895 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6896
6897 /* add base */
6898 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
6899 {
6900 case 0: u64EffAddr += pCtx->rax; break;
6901 case 1: u64EffAddr += pCtx->rcx; break;
6902 case 2: u64EffAddr += pCtx->rdx; break;
6903 case 3: u64EffAddr += pCtx->rbx; break;
6904 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6905 case 6: u64EffAddr += pCtx->rsi; break;
6906 case 7: u64EffAddr += pCtx->rdi; break;
6907 case 8: u64EffAddr += pCtx->r8; break;
6908 case 9: u64EffAddr += pCtx->r9; break;
6909 case 10: u64EffAddr += pCtx->r10; break;
6910 case 11: u64EffAddr += pCtx->r11; break;
6911 case 14: u64EffAddr += pCtx->r14; break;
6912 case 15: u64EffAddr += pCtx->r15; break;
6913 /* complicated encodings */
6914 case 5:
6915 case 13:
6916 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6917 {
6918 if (!pIemCpu->uRexB)
6919 {
6920 u64EffAddr += pCtx->rbp;
6921 SET_SS_DEF();
6922 }
6923 else
6924 u64EffAddr += pCtx->r13;
6925 }
6926 else
6927 {
6928 uint32_t u32Disp;
6929 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6930 u64EffAddr += (int32_t)u32Disp;
6931 }
6932 break;
6933 }
6934 break;
6935 }
6936 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6937 }
6938
6939 /* Get and add the displacement. */
6940 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6941 {
6942 case 0:
6943 break;
6944 case 1:
6945 {
6946 int8_t i8Disp;
6947 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6948 u64EffAddr += i8Disp;
6949 break;
6950 }
6951 case 2:
6952 {
6953 uint32_t u32Disp;
6954 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6955 u64EffAddr += (int32_t)u32Disp;
6956 break;
6957 }
6958 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6959 }
6960
6961 }
6962 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6963 *pGCPtrEff = u64EffAddr;
6964 else
6965 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6966 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6967 return VINF_SUCCESS;
6968 }
6969 }
6970
6971 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6972}
6973
6974/** @} */
6975
6976
6977
6978/*
6979 * Include the instructions
6980 */
6981#include "IEMAllInstructions.cpp.h"
6982
6983
6984
6985
6986#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6987
6988/**
6989 * Sets up execution verification mode.
6990 */
6991static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6992{
6993 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6994 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6995
6996 /*
6997 * Enable verification and/or logging.
6998 */
6999 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7000 if ( pIemCpu->fNoRem
7001#if 0 /* auto enable on first paged protected mode interrupt */
7002 && pOrgCtx->eflags.Bits.u1IF
7003 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7004 && TRPMHasTrap(pVCpu)
7005 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7006#endif
7007#if 0
7008 && pOrgCtx->cs == 0x10
7009 && ( pOrgCtx->rip == 0x90119e3e
7010 || pOrgCtx->rip == 0x901d9810
7011 )
7012#endif
7013#if 0 /* Auto enable DSL - FPU stuff. */
7014 && pOrgCtx->cs == 0x10
7015 && (// pOrgCtx->rip == 0xc02ec07f
7016 //|| pOrgCtx->rip == 0xc02ec082
7017 //|| pOrgCtx->rip == 0xc02ec0c9
7018 0
7019 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */
7020 )
7021#endif
7022#if 1 /* Auto enable DSL - fstp st0 stuff. */
7023 && pOrgCtx->cs == 0x23
7024 && pOrgCtx->rip == 0x804aff7
7025#endif
7026#if 0
7027 && pOrgCtx->rip == 0x9022bb3a
7028#endif
7029#if 0
7030 && 0
7031#endif
7032 )
7033 {
7034 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7035 RTLogFlags(NULL, "enabled");
7036 pIemCpu->fNoRem = false;
7037 }
7038
7039 /*
7040 * Switch state.
7041 */
7042 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7043 {
7044 static CPUMCTX s_DebugCtx; /* Ugly! */
7045
7046 s_DebugCtx = *pOrgCtx;
7047 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7048 }
7049
7050 /*
7051 * See if there is an interrupt pending in TRPM and inject it if we can.
7052 */
7053 if ( pOrgCtx->eflags.Bits.u1IF
7054 && TRPMHasTrap(pVCpu)
7055 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7056 {
7057 uint8_t u8TrapNo;
7058 TRPMEVENT enmType;
7059 RTGCUINT uErrCode;
7060 RTGCPTR uCr2;
7061 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
7062 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7063 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7064 TRPMResetTrap(pVCpu);
7065 }
7066
7067 /*
7068 * Reset the counters.
7069 */
7070 pIemCpu->cIOReads = 0;
7071 pIemCpu->cIOWrites = 0;
7072 pIemCpu->fUndefinedEFlags = 0;
7073
7074 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7075 {
7076 /*
7077 * Free all verification records.
7078 */
7079 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7080 pIemCpu->pIemEvtRecHead = NULL;
7081 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7082 do
7083 {
7084 while (pEvtRec)
7085 {
7086 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7087 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7088 pIemCpu->pFreeEvtRec = pEvtRec;
7089 pEvtRec = pNext;
7090 }
7091 pEvtRec = pIemCpu->pOtherEvtRecHead;
7092 pIemCpu->pOtherEvtRecHead = NULL;
7093 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7094 } while (pEvtRec);
7095 }
7096}
7097
7098
7099/**
7100 * Allocate an event record.
7101 * @returns Pointer to a record.
7102 */
7103static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7104{
7105 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7106 return NULL;
7107
7108 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7109 if (pEvtRec)
7110 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7111 else
7112 {
7113 if (!pIemCpu->ppIemEvtRecNext)
7114 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7115
7116 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7117 if (!pEvtRec)
7118 return NULL;
7119 }
7120 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7121 pEvtRec->pNext = NULL;
7122 return pEvtRec;
7123}
7124
7125
7126/**
7127 * IOMMMIORead notification.
7128 */
7129VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7130{
7131 PVMCPU pVCpu = VMMGetCpu(pVM);
7132 if (!pVCpu)
7133 return;
7134 PIEMCPU pIemCpu = &pVCpu->iem.s;
7135 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7136 if (!pEvtRec)
7137 return;
7138 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7139 pEvtRec->u.RamRead.GCPhys = GCPhys;
7140 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7141 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7142 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7143}
7144
7145
7146/**
7147 * IOMMMIOWrite notification.
7148 */
7149VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7150{
7151 PVMCPU pVCpu = VMMGetCpu(pVM);
7152 if (!pVCpu)
7153 return;
7154 PIEMCPU pIemCpu = &pVCpu->iem.s;
7155 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7156 if (!pEvtRec)
7157 return;
7158 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7159 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7160 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7161 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7162 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7163 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7164 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7165 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7166 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7167}
7168
7169
7170/**
7171 * IOMIOPortRead notification.
7172 */
7173VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7174{
7175 PVMCPU pVCpu = VMMGetCpu(pVM);
7176 if (!pVCpu)
7177 return;
7178 PIEMCPU pIemCpu = &pVCpu->iem.s;
7179 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7180 if (!pEvtRec)
7181 return;
7182 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7183 pEvtRec->u.IOPortRead.Port = Port;
7184 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7185 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7186 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7187}
7188
7189/**
7190 * IOMIOPortWrite notification.
7191 */
7192VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7193{
7194 PVMCPU pVCpu = VMMGetCpu(pVM);
7195 if (!pVCpu)
7196 return;
7197 PIEMCPU pIemCpu = &pVCpu->iem.s;
7198 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7199 if (!pEvtRec)
7200 return;
7201 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7202 pEvtRec->u.IOPortWrite.Port = Port;
7203 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7204 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7205 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7206 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7207}
7208
7209
7210VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7211{
7212 AssertFailed();
7213}
7214
7215
7216VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7217{
7218 AssertFailed();
7219}
7220
7221
7222/**
7223 * Fakes and records an I/O port read.
7224 *
7225 * @returns VINF_SUCCESS.
7226 * @param pIemCpu The IEM per CPU data.
7227 * @param Port The I/O port.
7228 * @param pu32Value Where to store the fake value.
7229 * @param cbValue The size of the access.
7230 */
7231static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7232{
7233 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7234 if (pEvtRec)
7235 {
7236 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7237 pEvtRec->u.IOPortRead.Port = Port;
7238 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7239 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7240 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7241 }
7242 pIemCpu->cIOReads++;
7243 *pu32Value = 0xcccccccc;
7244 return VINF_SUCCESS;
7245}
7246
7247
7248/**
7249 * Fakes and records an I/O port write.
7250 *
7251 * @returns VINF_SUCCESS.
7252 * @param pIemCpu The IEM per CPU data.
7253 * @param Port The I/O port.
7254 * @param u32Value The value being written.
7255 * @param cbValue The size of the access.
7256 */
7257static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7258{
7259 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7260 if (pEvtRec)
7261 {
7262 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7263 pEvtRec->u.IOPortWrite.Port = Port;
7264 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7265 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7266 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7267 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7268 }
7269 pIemCpu->cIOWrites++;
7270 return VINF_SUCCESS;
7271}
7272
7273
7274/**
7275 * Used to add extra details about a stub case.
7276 * @param pIemCpu The IEM per CPU state.
7277 */
7278static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7279{
7280 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7281 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7282 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7283 char szRegs[4096];
7284 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7285 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7286 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7287 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7288 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7289 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7290 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7291 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7292 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7293 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7294 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7295 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7296 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7297 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7298 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7299 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7300 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7301 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7302 " efer=%016VR{efer}\n"
7303 " pat=%016VR{pat}\n"
7304 " sf_mask=%016VR{sf_mask}\n"
7305 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7306 " lstar=%016VR{lstar}\n"
7307 " star=%016VR{star} cstar=%016VR{cstar}\n"
7308 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7309 );
7310
7311 char szInstr1[256];
7312 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
7313 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7314 szInstr1, sizeof(szInstr1), NULL);
7315 char szInstr2[256];
7316 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
7317 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7318 szInstr2, sizeof(szInstr2), NULL);
7319
7320 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7321}
7322
7323
7324/**
7325 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7326 * dump to the assertion info.
7327 *
7328 * @param pEvtRec The record to dump.
7329 */
7330static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7331{
7332 switch (pEvtRec->enmEvent)
7333 {
7334 case IEMVERIFYEVENT_IOPORT_READ:
7335 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7336 pEvtRec->u.IOPortWrite.Port,
7337 pEvtRec->u.IOPortWrite.cbValue);
7338 break;
7339 case IEMVERIFYEVENT_IOPORT_WRITE:
7340 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7341 pEvtRec->u.IOPortWrite.Port,
7342 pEvtRec->u.IOPortWrite.cbValue,
7343 pEvtRec->u.IOPortWrite.u32Value);
7344 break;
7345 case IEMVERIFYEVENT_RAM_READ:
7346 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7347 pEvtRec->u.RamRead.GCPhys,
7348 pEvtRec->u.RamRead.cb);
7349 break;
7350 case IEMVERIFYEVENT_RAM_WRITE:
7351 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7352 pEvtRec->u.RamWrite.GCPhys,
7353 pEvtRec->u.RamWrite.cb,
7354 (int)pEvtRec->u.RamWrite.cb,
7355 pEvtRec->u.RamWrite.ab);
7356 break;
7357 default:
7358 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7359 break;
7360 }
7361}
7362
7363
7364/**
7365 * Raises an assertion on the specified record, showing the given message with
7366 * a record dump attached.
7367 *
7368 * @param pIemCpu The IEM per CPU data.
7369 * @param pEvtRec1 The first record.
7370 * @param pEvtRec2 The second record.
7371 * @param pszMsg The message explaining why we're asserting.
7372 */
7373static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7374{
7375 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7376 iemVerifyAssertAddRecordDump(pEvtRec1);
7377 iemVerifyAssertAddRecordDump(pEvtRec2);
7378 iemVerifyAssertMsg2(pIemCpu);
7379 RTAssertPanic();
7380}
7381
7382
7383/**
7384 * Raises an assertion on the specified record, showing the given message with
7385 * a record dump attached.
7386 *
7387 * @param pIemCpu The IEM per CPU data.
7388 * @param pEvtRec1 The first record.
7389 * @param pszMsg The message explaining why we're asserting.
7390 */
7391static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7392{
7393 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7394 iemVerifyAssertAddRecordDump(pEvtRec);
7395 iemVerifyAssertMsg2(pIemCpu);
7396 RTAssertPanic();
7397}
7398
7399
7400/**
7401 * Verifies a write record.
7402 *
7403 * @param pIemCpu The IEM per CPU data.
7404 * @param pEvtRec The write record.
7405 */
7406static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7407{
7408 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7409 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7410 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7411 if ( RT_FAILURE(rc)
7412 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7413 {
7414 /* fend off ins */
7415 if ( !pIemCpu->cIOReads
7416 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7417 || ( pEvtRec->u.RamWrite.cb != 1
7418 && pEvtRec->u.RamWrite.cb != 2
7419 && pEvtRec->u.RamWrite.cb != 4) )
7420 {
7421 /* fend off ROMs */
7422 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7423 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7424 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7425 {
7426 /* fend off fxsave */
7427 if (pEvtRec->u.RamWrite.cb != 512)
7428 {
7429 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7430 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7431 RTAssertMsg2Add("REM: %.*Rhxs\n"
7432 "IEM: %.*Rhxs\n",
7433 pEvtRec->u.RamWrite.cb, abBuf,
7434 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7435 iemVerifyAssertAddRecordDump(pEvtRec);
7436 iemVerifyAssertMsg2(pIemCpu);
7437 RTAssertPanic();
7438 }
7439 }
7440 }
7441 }
7442
7443}
7444
7445/**
7446 * Performs the post-execution verfication checks.
7447 */
7448static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7449{
7450 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7451 return;
7452
7453 /*
7454 * Switch back the state.
7455 */
7456 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7457 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7458 Assert(pOrgCtx != pDebugCtx);
7459 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7460
7461 /*
7462 * Execute the instruction in REM.
7463 */
7464 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7465 EMRemLock(pVM);
7466 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7467 AssertRC(rc);
7468 EMRemUnlock(pVM);
7469
7470 /*
7471 * Compare the register states.
7472 */
7473 unsigned cDiffs = 0;
7474 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7475 {
7476 Log(("REM and IEM ends up with different registers!\n"));
7477
7478# define CHECK_FIELD(a_Field) \
7479 do \
7480 { \
7481 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7482 { \
7483 switch (sizeof(pOrgCtx->a_Field)) \
7484 { \
7485 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7486 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7487 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7488 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7489 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
7490 } \
7491 cDiffs++; \
7492 } \
7493 } while (0)
7494
7495# define CHECK_BIT_FIELD(a_Field) \
7496 do \
7497 { \
7498 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7499 { \
7500 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
7501 cDiffs++; \
7502 } \
7503 } while (0)
7504
7505# define CHECK_SEL(a_Sel) \
7506 do \
7507 { \
7508 CHECK_FIELD(a_Sel); \
7509 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
7510 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
7511 { \
7512 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
7513 cDiffs++; \
7514 } \
7515 CHECK_FIELD(a_Sel##Hid.u64Base); \
7516 CHECK_FIELD(a_Sel##Hid.u32Limit); \
7517 } while (0)
7518
7519#if 1 /* The recompiler doesn't update these the intel way. */
7520 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
7521 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
7522 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
7523 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
7524 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
7525 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
7526 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
7527 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
7528 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
7529 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
7530#endif
7531 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
7532 {
7533 RTAssertMsg2Weak(" the FPU state differs\n");
7534 cDiffs++;
7535 CHECK_FIELD(fpu.FCW);
7536 CHECK_FIELD(fpu.FSW);
7537 CHECK_FIELD(fpu.FTW);
7538 CHECK_FIELD(fpu.FOP);
7539 CHECK_FIELD(fpu.FPUIP);
7540 CHECK_FIELD(fpu.CS);
7541 CHECK_FIELD(fpu.Rsrvd1);
7542 CHECK_FIELD(fpu.FPUDP);
7543 CHECK_FIELD(fpu.DS);
7544 CHECK_FIELD(fpu.Rsrvd2);
7545 CHECK_FIELD(fpu.MXCSR);
7546 CHECK_FIELD(fpu.MXCSR_MASK);
7547 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
7548 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
7549 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
7550 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
7551 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
7552 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
7553 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
7554 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
7555 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
7556 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
7557 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
7558 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
7559 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
7560 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
7561 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
7562 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
7563 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
7564 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
7565 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
7566 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
7567 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
7568 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
7569 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
7570 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
7571 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
7572 CHECK_FIELD(fpu.au32RsrvdRest[i]);
7573 }
7574 CHECK_FIELD(rip);
7575 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
7576 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
7577 {
7578 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
7579 CHECK_BIT_FIELD(rflags.Bits.u1CF);
7580 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
7581 CHECK_BIT_FIELD(rflags.Bits.u1PF);
7582 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
7583 CHECK_BIT_FIELD(rflags.Bits.u1AF);
7584 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
7585 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
7586 CHECK_BIT_FIELD(rflags.Bits.u1SF);
7587 CHECK_BIT_FIELD(rflags.Bits.u1TF);
7588 CHECK_BIT_FIELD(rflags.Bits.u1IF);
7589 CHECK_BIT_FIELD(rflags.Bits.u1DF);
7590 CHECK_BIT_FIELD(rflags.Bits.u1OF);
7591 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
7592 CHECK_BIT_FIELD(rflags.Bits.u1NT);
7593 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
7594 CHECK_BIT_FIELD(rflags.Bits.u1RF);
7595 CHECK_BIT_FIELD(rflags.Bits.u1VM);
7596 CHECK_BIT_FIELD(rflags.Bits.u1AC);
7597 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
7598 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
7599 CHECK_BIT_FIELD(rflags.Bits.u1ID);
7600 }
7601
7602 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
7603 CHECK_FIELD(rax);
7604 CHECK_FIELD(rcx);
7605 if (!pIemCpu->fIgnoreRaxRdx)
7606 CHECK_FIELD(rdx);
7607 CHECK_FIELD(rbx);
7608 CHECK_FIELD(rsp);
7609 CHECK_FIELD(rbp);
7610 CHECK_FIELD(rsi);
7611 CHECK_FIELD(rdi);
7612 CHECK_FIELD(r8);
7613 CHECK_FIELD(r9);
7614 CHECK_FIELD(r10);
7615 CHECK_FIELD(r11);
7616 CHECK_FIELD(r12);
7617 CHECK_FIELD(r13);
7618 CHECK_SEL(cs);
7619 CHECK_SEL(ss);
7620 CHECK_SEL(ds);
7621 CHECK_SEL(es);
7622 CHECK_SEL(fs);
7623 CHECK_SEL(gs);
7624 CHECK_FIELD(cr0);
7625 CHECK_FIELD(cr2);
7626 CHECK_FIELD(cr3);
7627 CHECK_FIELD(cr4);
7628 CHECK_FIELD(dr[0]);
7629 CHECK_FIELD(dr[1]);
7630 CHECK_FIELD(dr[2]);
7631 CHECK_FIELD(dr[3]);
7632 CHECK_FIELD(dr[6]);
7633 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
7634 CHECK_FIELD(dr[7]);
7635 CHECK_FIELD(gdtr.cbGdt);
7636 CHECK_FIELD(gdtr.pGdt);
7637 CHECK_FIELD(idtr.cbIdt);
7638 CHECK_FIELD(idtr.pIdt);
7639 CHECK_FIELD(ldtr);
7640 CHECK_FIELD(ldtrHid.u64Base);
7641 CHECK_FIELD(ldtrHid.u32Limit);
7642 CHECK_FIELD(ldtrHid.Attr.u);
7643 CHECK_FIELD(tr);
7644 CHECK_FIELD(trHid.u64Base);
7645 CHECK_FIELD(trHid.u32Limit);
7646 CHECK_FIELD(trHid.Attr.u);
7647 CHECK_FIELD(SysEnter.cs);
7648 CHECK_FIELD(SysEnter.eip);
7649 CHECK_FIELD(SysEnter.esp);
7650 CHECK_FIELD(msrEFER);
7651 CHECK_FIELD(msrSTAR);
7652 CHECK_FIELD(msrPAT);
7653 CHECK_FIELD(msrLSTAR);
7654 CHECK_FIELD(msrCSTAR);
7655 CHECK_FIELD(msrSFMASK);
7656 CHECK_FIELD(msrKERNELGSBASE);
7657
7658 if (cDiffs != 0)
7659 {
7660 if (LogIs3Enabled())
7661 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
7662 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
7663 iemVerifyAssertMsg2(pIemCpu);
7664 RTAssertPanic();
7665 }
7666# undef CHECK_FIELD
7667# undef CHECK_BIT_FIELD
7668 }
7669
7670 /*
7671 * If the register state compared fine, check the verification event
7672 * records.
7673 */
7674 if (cDiffs == 0)
7675 {
7676 /*
7677 * Compare verficiation event records.
7678 * - I/O port accesses should be a 1:1 match.
7679 */
7680 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
7681 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
7682 while (pIemRec && pOtherRec)
7683 {
7684 /* Since we might miss RAM writes and reads, ignore reads and check
7685 that any written memory is the same extra ones. */
7686 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
7687 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
7688 && pIemRec->pNext)
7689 {
7690 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
7691 iemVerifyWriteRecord(pIemCpu, pIemRec);
7692 pIemRec = pIemRec->pNext;
7693 }
7694
7695 /* Do the compare. */
7696 if (pIemRec->enmEvent != pOtherRec->enmEvent)
7697 {
7698 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
7699 break;
7700 }
7701 bool fEquals;
7702 switch (pIemRec->enmEvent)
7703 {
7704 case IEMVERIFYEVENT_IOPORT_READ:
7705 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
7706 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
7707 break;
7708 case IEMVERIFYEVENT_IOPORT_WRITE:
7709 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
7710 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
7711 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
7712 break;
7713 case IEMVERIFYEVENT_RAM_READ:
7714 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
7715 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
7716 break;
7717 case IEMVERIFYEVENT_RAM_WRITE:
7718 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
7719 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
7720 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
7721 break;
7722 default:
7723 fEquals = false;
7724 break;
7725 }
7726 if (!fEquals)
7727 {
7728 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
7729 break;
7730 }
7731
7732 /* advance */
7733 pIemRec = pIemRec->pNext;
7734 pOtherRec = pOtherRec->pNext;
7735 }
7736
7737 /* Ignore extra writes and reads. */
7738 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
7739 {
7740 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
7741 iemVerifyWriteRecord(pIemCpu, pIemRec);
7742 pIemRec = pIemRec->pNext;
7743 }
7744 if (pIemRec != NULL)
7745 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
7746 else if (pOtherRec != NULL)
7747 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
7748 }
7749 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7750
7751#if 0
7752 /*
7753 * HACK ALERT! You don't normally want to verify a whole boot sequence.
7754 */
7755 if (pIemCpu->cInstructions == 1)
7756 RTLogFlags(NULL, "disabled");
7757#endif
7758}
7759
7760#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
7761
7762/* stubs */
7763static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7764{
7765 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
7766 return VERR_INTERNAL_ERROR;
7767}
7768
7769static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7770{
7771 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
7772 return VERR_INTERNAL_ERROR;
7773}
7774
7775#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
7776
7777
7778/**
7779 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
7780 * IEMExecOneWithPrefetchedByPC.
7781 *
7782 * @return Strict VBox status code.
7783 * @param pVCpu The current virtual CPU.
7784 * @param pIemCpu The IEM per CPU data.
7785 */
7786DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu)
7787{
7788 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7789 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7790 if (rcStrict == VINF_SUCCESS)
7791 pIemCpu->cInstructions++;
7792//#ifdef DEBUG
7793// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
7794//#endif
7795
7796 /* Execute the next instruction as well if a cli, pop ss or
7797 mov ss, Gr has just completed successfully. */
7798 if ( rcStrict == VINF_SUCCESS
7799 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7800 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
7801 {
7802 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7803 if (rcStrict == VINF_SUCCESS)
7804 {
7805 b; IEM_OPCODE_GET_NEXT_U8(&b);
7806 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7807 if (rcStrict == VINF_SUCCESS)
7808 pIemCpu->cInstructions++;
7809 }
7810 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
7811 }
7812
7813 return rcStrict;
7814}
7815
7816
7817/**
7818 * Execute one instruction.
7819 *
7820 * @return Strict VBox status code.
7821 * @param pVCpu The current virtual CPU.
7822 */
7823VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
7824{
7825 PIEMCPU pIemCpu = &pVCpu->iem.s;
7826
7827#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7828 iemExecVerificationModeSetup(pIemCpu);
7829#endif
7830#ifdef LOG_ENABLED
7831 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7832# ifdef IN_RING3
7833 if (LogIs2Enabled())
7834 {
7835 char szInstr[256];
7836 uint32_t cbInstr = 0;
7837 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
7838 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7839 szInstr, sizeof(szInstr), &cbInstr);
7840
7841 Log3(("**** "
7842 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
7843 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
7844 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
7845 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
7846 " %s\n"
7847 ,
7848 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
7849 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
7850 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
7851 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
7852 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
7853 szInstr));
7854
7855 if (LogIs3Enabled())
7856 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
7857 }
7858 else
7859# endif
7860 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
7861 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
7862#endif
7863
7864 /*
7865 * Do the decoding and emulation.
7866 */
7867 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7868 if (rcStrict == VINF_SUCCESS)
7869 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7870
7871#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7872 /*
7873 * Assert some sanity.
7874 */
7875 iemExecVerificationModeCheck(pIemCpu);
7876#endif
7877 if (rcStrict != VINF_SUCCESS)
7878 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
7879 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
7880 return rcStrict;
7881}
7882
7883
7884VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
7885{
7886 PIEMCPU pIemCpu = &pVCpu->iem.s;
7887 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7888 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
7889
7890 iemInitDecoder(pIemCpu);
7891 uint32_t const cbOldWritten = pIemCpu->cbWritten;
7892
7893 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7894 if (rcStrict == VINF_SUCCESS)
7895 {
7896 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7897 if (pcbWritten)
7898 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
7899 }
7900 return rcStrict;
7901}
7902
7903
7904VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
7905 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
7906{
7907 PIEMCPU pIemCpu = &pVCpu->iem.s;
7908 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7909 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
7910
7911 VBOXSTRICTRC rcStrict;
7912 if ( cbOpcodeBytes
7913 && pCtx->rip == OpcodeBytesPC)
7914 {
7915 iemInitDecoder(pIemCpu);
7916 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
7917 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
7918 rcStrict = VINF_SUCCESS;
7919 }
7920 else
7921 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7922 if (rcStrict == VINF_SUCCESS)
7923 {
7924 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7925 }
7926 return rcStrict;
7927}
7928
7929
7930VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
7931{
7932 return IEMExecOne(pVCpu);
7933}
7934
7935
7936
7937/**
7938 * Injects a trap, fault, abort, software interrupt or external interrupt.
7939 *
7940 * The parameter list matches TRPMQueryTrapAll pretty closely.
7941 *
7942 * @returns Strict VBox status code.
7943 * @param pVCpu The current virtual CPU.
7944 * @param u8TrapNo The trap number.
7945 * @param enmType What type is it (trap/fault/abort), software
7946 * interrupt or hardware interrupt.
7947 * @param uErrCode The error code if applicable.
7948 * @param uCr2 The CR2 value if applicable.
7949 */
7950VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
7951{
7952 iemInitDecoder(&pVCpu->iem.s);
7953
7954 uint32_t fFlags;
7955 switch (enmType)
7956 {
7957 case TRPM_HARDWARE_INT:
7958 LogFlow(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
7959 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
7960 uErrCode = uCr2 = 0;
7961 break;
7962
7963 case TRPM_SOFTWARE_INT:
7964 LogFlow(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
7965 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7966 uErrCode = uCr2 = 0;
7967 break;
7968
7969 case TRPM_TRAP:
7970 LogFlow(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
7971 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
7972 if (u8TrapNo == X86_XCPT_PF)
7973 fFlags |= IEM_XCPT_FLAGS_CR2;
7974 switch (u8TrapNo)
7975 {
7976 case X86_XCPT_DF:
7977 case X86_XCPT_TS:
7978 case X86_XCPT_NP:
7979 case X86_XCPT_SS:
7980 case X86_XCPT_PF:
7981 case X86_XCPT_AC:
7982 fFlags |= IEM_XCPT_FLAGS_ERR;
7983 break;
7984 }
7985 break;
7986
7987 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7988 }
7989
7990 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
7991}
7992
7993
7994VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
7995{
7996 return VERR_NOT_IMPLEMENTED;
7997}
7998
7999
8000VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8001{
8002 return VERR_NOT_IMPLEMENTED;
8003}
8004
8005
8006#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8007/**
8008 * Executes a IRET instruction with default operand size.
8009 *
8010 * This is for PATM.
8011 *
8012 * @returns VBox status code.
8013 * @param pVCpu The current virtual CPU.
8014 * @param pCtxCore The register frame.
8015 */
8016VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8017{
8018 PIEMCPU pIemCpu = &pVCpu->iem.s;
8019 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8020
8021 iemCtxCoreToCtx(pCtx, pCtxCore);
8022 iemInitDecoder(pIemCpu);
8023 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8024 if (rcStrict == VINF_SUCCESS)
8025 iemCtxToCtxCore(pCtxCore, pCtx);
8026 else
8027 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8028 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8029 return rcStrict;
8030}
8031#endif
8032
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette