VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 42676

Last change on this file since 42676 was 42676, checked in by vboxsync, 12 years ago

IEM: IRET to V8086 (had to try). Set CPUM_CHANGED_FPU_REM. Added missing RIP advancing to fincstp and fdecstp.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 306.4 KB
Line 
1/* $Id: IEMAll.cpp 42676 2012-08-08 09:23:50Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/*******************************************************************************
74* Header Files *
75*******************************************************************************/
76#define LOG_GROUP LOG_GROUP_IEM
77#include <VBox/vmm/iem.h>
78#include <VBox/vmm/cpum.h>
79#include <VBox/vmm/pgm.h>
80#include <internal/pgm.h>
81#include <VBox/vmm/iom.h>
82#include <VBox/vmm/em.h>
83#include <VBox/vmm/tm.h>
84#include <VBox/vmm/dbgf.h>
85#ifdef VBOX_WITH_RAW_MODE_NOT_R0
86# include <VBox/vmm/patm.h>
87#endif
88#ifdef IEM_VERIFICATION_MODE
89# include <VBox/vmm/rem.h>
90# include <VBox/vmm/mm.h>
91#endif
92#include "IEMInternal.h"
93#include <VBox/vmm/vm.h>
94#include <VBox/log.h>
95#include <VBox/err.h>
96#include <VBox/param.h>
97#include <iprt/assert.h>
98#include <iprt/string.h>
99#include <iprt/x86.h>
100
101
102/*******************************************************************************
103* Structures and Typedefs *
104*******************************************************************************/
105/** @typedef PFNIEMOP
106 * Pointer to an opcode decoder function.
107 */
108
109/** @def FNIEMOP_DEF
110 * Define an opcode decoder function.
111 *
112 * We're using macors for this so that adding and removing parameters as well as
113 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
114 *
115 * @param a_Name The function name.
116 */
117
118
119#if defined(__GNUC__) && defined(RT_ARCH_X86)
120typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
121# define FNIEMOP_DEF(a_Name) \
122 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
123# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
124 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
125# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
126 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
127
128#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
129typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
130# define FNIEMOP_DEF(a_Name) \
131 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
132# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
133 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
134# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
135 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
136
137#elif defined(__GNUC__)
138typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
139# define FNIEMOP_DEF(a_Name) \
140 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
141# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
142 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
143# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
144 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
145
146#else
147typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
148# define FNIEMOP_DEF(a_Name) \
149 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
150# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
151 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
152# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
153 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
154
155#endif
156
157
158/**
159 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
160 */
161typedef union IEMSELDESC
162{
163 /** The legacy view. */
164 X86DESC Legacy;
165 /** The long mode view. */
166 X86DESC64 Long;
167} IEMSELDESC;
168/** Pointer to a selector descriptor table entry. */
169typedef IEMSELDESC *PIEMSELDESC;
170
171
172/*******************************************************************************
173* Defined Constants And Macros *
174*******************************************************************************/
175/** @name IEM status codes.
176 *
177 * Not quite sure how this will play out in the end, just aliasing safe status
178 * codes for now.
179 *
180 * @{ */
181#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
182/** @} */
183
184/** Temporary hack to disable the double execution. Will be removed in favor
185 * of a dedicated execution mode in EM. */
186//#define IEM_VERIFICATION_MODE_NO_REM
187
188/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
189 * due to GCC lacking knowledge about the value range of a switch. */
190#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
191
192/**
193 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
194 * occation.
195 */
196#ifdef LOG_ENABLED
197# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
198 do { \
199 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
200 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
201 } while (0)
202#else
203# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
204 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
205#endif
206
207/**
208 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
209 * occation using the supplied logger statement.
210 *
211 * @param a_LoggerArgs What to log on failure.
212 */
213#ifdef LOG_ENABLED
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
215 do { \
216 LogFunc(a_LoggerArgs); \
217 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
218 } while (0)
219#else
220# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
221 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
222#endif
223
224/**
225 * Call an opcode decoder function.
226 *
227 * We're using macors for this so that adding and removing parameters can be
228 * done as we please. See FNIEMOP_DEF.
229 */
230#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
231
232/**
233 * Call a common opcode decoder function taking one extra argument.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF_1.
237 */
238#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
247
248/**
249 * Check if we're currently executing in real or virtual 8086 mode.
250 *
251 * @returns @c true if it is, @c false if not.
252 * @param a_pIemCpu The IEM state of the current CPU.
253 */
254#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
255
256/**
257 * Check if we're currently executing in long mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in real mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
274 */
275#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
276
277/**
278 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
279 */
280#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
281
282/**
283 * Tests if at least on of the specified AMD CPUID features (extended) are
284 * marked present.
285 */
286#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
287
288/**
289 * Checks if an Intel CPUID feature is present.
290 */
291#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
292 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
293 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
294
295/**
296 * Evaluates to true if we're presenting an Intel CPU to the guest.
297 */
298#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
299
300/**
301 * Evaluates to true if we're presenting an AMD CPU to the guest.
302 */
303#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
304
305/**
306 * Check if the address is canonical.
307 */
308#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
309
310
311/*******************************************************************************
312* Global Variables *
313*******************************************************************************/
314extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
315
316
317/** Function table for the ADD instruction. */
318static const IEMOPBINSIZES g_iemAImpl_add =
319{
320 iemAImpl_add_u8, iemAImpl_add_u8_locked,
321 iemAImpl_add_u16, iemAImpl_add_u16_locked,
322 iemAImpl_add_u32, iemAImpl_add_u32_locked,
323 iemAImpl_add_u64, iemAImpl_add_u64_locked
324};
325
326/** Function table for the ADC instruction. */
327static const IEMOPBINSIZES g_iemAImpl_adc =
328{
329 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
330 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
331 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
332 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
333};
334
335/** Function table for the SUB instruction. */
336static const IEMOPBINSIZES g_iemAImpl_sub =
337{
338 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
339 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
340 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
341 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
342};
343
344/** Function table for the SBB instruction. */
345static const IEMOPBINSIZES g_iemAImpl_sbb =
346{
347 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
348 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
349 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
350 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
351};
352
353/** Function table for the OR instruction. */
354static const IEMOPBINSIZES g_iemAImpl_or =
355{
356 iemAImpl_or_u8, iemAImpl_or_u8_locked,
357 iemAImpl_or_u16, iemAImpl_or_u16_locked,
358 iemAImpl_or_u32, iemAImpl_or_u32_locked,
359 iemAImpl_or_u64, iemAImpl_or_u64_locked
360};
361
362/** Function table for the XOR instruction. */
363static const IEMOPBINSIZES g_iemAImpl_xor =
364{
365 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
366 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
367 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
368 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
369};
370
371/** Function table for the AND instruction. */
372static const IEMOPBINSIZES g_iemAImpl_and =
373{
374 iemAImpl_and_u8, iemAImpl_and_u8_locked,
375 iemAImpl_and_u16, iemAImpl_and_u16_locked,
376 iemAImpl_and_u32, iemAImpl_and_u32_locked,
377 iemAImpl_and_u64, iemAImpl_and_u64_locked
378};
379
380/** Function table for the CMP instruction.
381 * @remarks Making operand order ASSUMPTIONS.
382 */
383static const IEMOPBINSIZES g_iemAImpl_cmp =
384{
385 iemAImpl_cmp_u8, NULL,
386 iemAImpl_cmp_u16, NULL,
387 iemAImpl_cmp_u32, NULL,
388 iemAImpl_cmp_u64, NULL
389};
390
391/** Function table for the TEST instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394static const IEMOPBINSIZES g_iemAImpl_test =
395{
396 iemAImpl_test_u8, NULL,
397 iemAImpl_test_u16, NULL,
398 iemAImpl_test_u32, NULL,
399 iemAImpl_test_u64, NULL
400};
401
402/** Function table for the BT instruction. */
403static const IEMOPBINSIZES g_iemAImpl_bt =
404{
405 NULL, NULL,
406 iemAImpl_bt_u16, NULL,
407 iemAImpl_bt_u32, NULL,
408 iemAImpl_bt_u64, NULL
409};
410
411/** Function table for the BTC instruction. */
412static const IEMOPBINSIZES g_iemAImpl_btc =
413{
414 NULL, NULL,
415 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
416 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
417 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
418};
419
420/** Function table for the BTR instruction. */
421static const IEMOPBINSIZES g_iemAImpl_btr =
422{
423 NULL, NULL,
424 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
425 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
426 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
427};
428
429/** Function table for the BTS instruction. */
430static const IEMOPBINSIZES g_iemAImpl_bts =
431{
432 NULL, NULL,
433 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
434 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
435 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
436};
437
438/** Function table for the BSF instruction. */
439static const IEMOPBINSIZES g_iemAImpl_bsf =
440{
441 NULL, NULL,
442 iemAImpl_bsf_u16, NULL,
443 iemAImpl_bsf_u32, NULL,
444 iemAImpl_bsf_u64, NULL
445};
446
447/** Function table for the BSR instruction. */
448static const IEMOPBINSIZES g_iemAImpl_bsr =
449{
450 NULL, NULL,
451 iemAImpl_bsr_u16, NULL,
452 iemAImpl_bsr_u32, NULL,
453 iemAImpl_bsr_u64, NULL
454};
455
456/** Function table for the IMUL instruction. */
457static const IEMOPBINSIZES g_iemAImpl_imul_two =
458{
459 NULL, NULL,
460 iemAImpl_imul_two_u16, NULL,
461 iemAImpl_imul_two_u32, NULL,
462 iemAImpl_imul_two_u64, NULL
463};
464
465/** Group 1 /r lookup table. */
466static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
467{
468 &g_iemAImpl_add,
469 &g_iemAImpl_or,
470 &g_iemAImpl_adc,
471 &g_iemAImpl_sbb,
472 &g_iemAImpl_and,
473 &g_iemAImpl_sub,
474 &g_iemAImpl_xor,
475 &g_iemAImpl_cmp
476};
477
478/** Function table for the INC instruction. */
479static const IEMOPUNARYSIZES g_iemAImpl_inc =
480{
481 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
482 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
483 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
484 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
485};
486
487/** Function table for the DEC instruction. */
488static const IEMOPUNARYSIZES g_iemAImpl_dec =
489{
490 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
491 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
492 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
493 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
494};
495
496/** Function table for the NEG instruction. */
497static const IEMOPUNARYSIZES g_iemAImpl_neg =
498{
499 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
500 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
501 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
502 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
503};
504
505/** Function table for the NOT instruction. */
506static const IEMOPUNARYSIZES g_iemAImpl_not =
507{
508 iemAImpl_not_u8, iemAImpl_not_u8_locked,
509 iemAImpl_not_u16, iemAImpl_not_u16_locked,
510 iemAImpl_not_u32, iemAImpl_not_u32_locked,
511 iemAImpl_not_u64, iemAImpl_not_u64_locked
512};
513
514
515/** Function table for the ROL instruction. */
516static const IEMOPSHIFTSIZES g_iemAImpl_rol =
517{
518 iemAImpl_rol_u8,
519 iemAImpl_rol_u16,
520 iemAImpl_rol_u32,
521 iemAImpl_rol_u64
522};
523
524/** Function table for the ROR instruction. */
525static const IEMOPSHIFTSIZES g_iemAImpl_ror =
526{
527 iemAImpl_ror_u8,
528 iemAImpl_ror_u16,
529 iemAImpl_ror_u32,
530 iemAImpl_ror_u64
531};
532
533/** Function table for the RCL instruction. */
534static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
535{
536 iemAImpl_rcl_u8,
537 iemAImpl_rcl_u16,
538 iemAImpl_rcl_u32,
539 iemAImpl_rcl_u64
540};
541
542/** Function table for the RCR instruction. */
543static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
544{
545 iemAImpl_rcr_u8,
546 iemAImpl_rcr_u16,
547 iemAImpl_rcr_u32,
548 iemAImpl_rcr_u64
549};
550
551/** Function table for the SHL instruction. */
552static const IEMOPSHIFTSIZES g_iemAImpl_shl =
553{
554 iemAImpl_shl_u8,
555 iemAImpl_shl_u16,
556 iemAImpl_shl_u32,
557 iemAImpl_shl_u64
558};
559
560/** Function table for the SHR instruction. */
561static const IEMOPSHIFTSIZES g_iemAImpl_shr =
562{
563 iemAImpl_shr_u8,
564 iemAImpl_shr_u16,
565 iemAImpl_shr_u32,
566 iemAImpl_shr_u64
567};
568
569/** Function table for the SAR instruction. */
570static const IEMOPSHIFTSIZES g_iemAImpl_sar =
571{
572 iemAImpl_sar_u8,
573 iemAImpl_sar_u16,
574 iemAImpl_sar_u32,
575 iemAImpl_sar_u64
576};
577
578
579/** Function table for the MUL instruction. */
580static const IEMOPMULDIVSIZES g_iemAImpl_mul =
581{
582 iemAImpl_mul_u8,
583 iemAImpl_mul_u16,
584 iemAImpl_mul_u32,
585 iemAImpl_mul_u64
586};
587
588/** Function table for the IMUL instruction working implicitly on rAX. */
589static const IEMOPMULDIVSIZES g_iemAImpl_imul =
590{
591 iemAImpl_imul_u8,
592 iemAImpl_imul_u16,
593 iemAImpl_imul_u32,
594 iemAImpl_imul_u64
595};
596
597/** Function table for the DIV instruction. */
598static const IEMOPMULDIVSIZES g_iemAImpl_div =
599{
600 iemAImpl_div_u8,
601 iemAImpl_div_u16,
602 iemAImpl_div_u32,
603 iemAImpl_div_u64
604};
605
606/** Function table for the MUL instruction. */
607static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
608{
609 iemAImpl_idiv_u8,
610 iemAImpl_idiv_u16,
611 iemAImpl_idiv_u32,
612 iemAImpl_idiv_u64
613};
614
615/** Function table for the SHLD instruction */
616static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
617{
618 iemAImpl_shld_u16,
619 iemAImpl_shld_u32,
620 iemAImpl_shld_u64,
621};
622
623/** Function table for the SHRD instruction */
624static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
625{
626 iemAImpl_shrd_u16,
627 iemAImpl_shrd_u32,
628 iemAImpl_shrd_u64,
629};
630
631
632/*******************************************************************************
633* Internal Functions *
634*******************************************************************************/
635static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
636/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
637static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
638static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
639static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
640static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
641static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
642static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
643static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
644static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
645static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
646static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
647static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
648static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
649static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
650static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
651static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
652static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
653static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
654static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
655static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
656static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
657static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
658static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
659
660#ifdef IEM_VERIFICATION_MODE
661static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
662#endif
663static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
664static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
665
666
667/**
668 * Sets the pass up status.
669 *
670 * @returns VINF_SUCCESS.
671 * @param pIemCpu The per CPU IEM state of the calling thread.
672 * @param rcPassUp The pass up status. Must be informational.
673 * VINF_SUCCESS is not allowed.
674 */
675static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
676{
677 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
678
679 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
680 if (rcOldPassUp == VINF_SUCCESS)
681 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
682 /* If both are EM scheduling code, use EM priority rules. */
683 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
684 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
685 {
686 if (rcPassUp < rcOldPassUp)
687 {
688 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
689 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
690 }
691 else
692 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
693 }
694 /* Override EM scheduling with specific status code. */
695 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
696 {
697 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
698 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
699 }
700 /* Don't override specific status code, first come first served. */
701 else
702 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
703 return VINF_SUCCESS;
704}
705
706
707/**
708 * Initializes the decoder state.
709 *
710 * @param pIemCpu The per CPU IEM state.
711 */
712DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
713{
714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
715 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
716
717#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
719 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
720 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
721 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
722 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
723 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
724 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
725 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
726#endif
727
728#ifdef VBOX_WITH_RAW_MODE_NOT_R0
729 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
730#endif
731 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
732#ifdef IEM_VERIFICATION_MODE
733 if (pIemCpu->uInjectCpl != UINT8_MAX)
734 pIemCpu->uCpl = pIemCpu->uInjectCpl;
735#endif
736 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
737 ? IEMMODE_64BIT
738 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
739 ? IEMMODE_32BIT
740 : IEMMODE_16BIT;
741 pIemCpu->enmCpuMode = enmMode;
742 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
743 pIemCpu->enmEffAddrMode = enmMode;
744 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
745 pIemCpu->enmEffOpSize = enmMode;
746 pIemCpu->fPrefixes = 0;
747 pIemCpu->uRexReg = 0;
748 pIemCpu->uRexB = 0;
749 pIemCpu->uRexIndex = 0;
750 pIemCpu->iEffSeg = X86_SREG_DS;
751 pIemCpu->offOpcode = 0;
752 pIemCpu->cbOpcode = 0;
753 pIemCpu->cActiveMappings = 0;
754 pIemCpu->iNextMapping = 0;
755 pIemCpu->rcPassUp = VINF_SUCCESS;
756}
757
758
759/**
760 * Prefetch opcodes the first time when starting executing.
761 *
762 * @returns Strict VBox status code.
763 * @param pIemCpu The IEM state.
764 */
765static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
766{
767#ifdef IEM_VERIFICATION_MODE
768 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
769#endif
770 iemInitDecoder(pIemCpu);
771
772 /*
773 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
774 *
775 * First translate CS:rIP to a physical address.
776 */
777 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
778 uint32_t cbToTryRead;
779 RTGCPTR GCPtrPC;
780 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
781 {
782 cbToTryRead = PAGE_SIZE;
783 GCPtrPC = pCtx->rip;
784 if (!IEM_IS_CANONICAL(GCPtrPC))
785 return iemRaiseGeneralProtectionFault0(pIemCpu);
786 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
787 }
788 else
789 {
790 uint32_t GCPtrPC32 = pCtx->eip;
791 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
792 if (GCPtrPC32 > pCtx->cs.u32Limit)
793 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
794 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
795 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
796 }
797
798 RTGCPHYS GCPhys;
799 uint64_t fFlags;
800 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
801 if (RT_FAILURE(rc))
802 {
803#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
804 /* Allow interpretation of patch manager code blocks since they can for
805 instance throw #PFs for perfectly good reasons. */
806 if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1
807 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC))
808 {
809 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
810 if (cbToTryRead > cbLeftOnPage)
811 cbToTryRead = cbLeftOnPage;
812 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
813 cbToTryRead = sizeof(pIemCpu->abOpcode);
814 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
815 pIemCpu->cbOpcode = cbToTryRead;
816 return VINF_SUCCESS;
817 }
818#endif
819 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
820 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
821 }
822 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
823 {
824 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
825 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
826 }
827 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
828 {
829 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
830 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
831 }
832 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
833 /** @todo Check reserved bits and such stuff. PGM is better at doing
834 * that, so do it when implementing the guest virtual address
835 * TLB... */
836
837#ifdef IEM_VERIFICATION_MODE
838 /*
839 * Optimistic optimization: Use unconsumed opcode bytes from the previous
840 * instruction.
841 */
842 /** @todo optimize this differently by not using PGMPhysRead. */
843 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
844 pIemCpu->GCPhysOpcodes = GCPhys;
845 if ( offPrevOpcodes < cbOldOpcodes
846 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
847 {
848 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
849 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
850 pIemCpu->cbOpcode = cbNew;
851 return VINF_SUCCESS;
852 }
853#endif
854
855 /*
856 * Read the bytes at this address.
857 */
858 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
859 if (cbToTryRead > cbLeftOnPage)
860 cbToTryRead = cbLeftOnPage;
861 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
862 cbToTryRead = sizeof(pIemCpu->abOpcode);
863 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
864 * doing that. */
865 if (!pIemCpu->fByPassHandlers)
866 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
867 else
868 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
869 if (rc != VINF_SUCCESS)
870 {
871 /** @todo status code handling */
872 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrPC, rc));
873 return rc;
874 }
875 pIemCpu->cbOpcode = cbToTryRead;
876
877 return VINF_SUCCESS;
878}
879
880
881/**
882 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
883 * exception if it fails.
884 *
885 * @returns Strict VBox status code.
886 * @param pIemCpu The IEM state.
887 * @param cbMin Where to return the opcode byte.
888 */
889static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
890{
891 /*
892 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
893 *
894 * First translate CS:rIP to a physical address.
895 */
896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
897 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
898 uint32_t cbToTryRead;
899 RTGCPTR GCPtrNext;
900 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
901 {
902 cbToTryRead = PAGE_SIZE;
903 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
904 if (!IEM_IS_CANONICAL(GCPtrNext))
905 return iemRaiseGeneralProtectionFault0(pIemCpu);
906 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
907 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
908 }
909 else
910 {
911 uint32_t GCPtrNext32 = pCtx->eip;
912 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
913 GCPtrNext32 += pIemCpu->cbOpcode;
914 if (GCPtrNext32 > pCtx->cs.u32Limit)
915 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
916 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
917 if (cbToTryRead < cbMin - cbLeft)
918 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
919 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
920 }
921
922 RTGCPHYS GCPhys;
923 uint64_t fFlags;
924 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
925 if (RT_FAILURE(rc))
926 {
927 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
928 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
929 }
930 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
931 {
932 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
933 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
934 }
935 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
936 {
937 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
938 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
939 }
940 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
941 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
942 /** @todo Check reserved bits and such stuff. PGM is better at doing
943 * that, so do it when implementing the guest virtual address
944 * TLB... */
945
946 /*
947 * Read the bytes at this address.
948 */
949 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
950 if (cbToTryRead > cbLeftOnPage)
951 cbToTryRead = cbLeftOnPage;
952 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
953 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
954 Assert(cbToTryRead >= cbMin - cbLeft);
955 if (!pIemCpu->fByPassHandlers)
956 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
957 else
958 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
959 if (rc != VINF_SUCCESS)
960 {
961 /** @todo status code handling */
962 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
963 return rc;
964 }
965 pIemCpu->cbOpcode += cbToTryRead;
966 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
967
968 return VINF_SUCCESS;
969}
970
971
972/**
973 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
974 *
975 * @returns Strict VBox status code.
976 * @param pIemCpu The IEM state.
977 * @param pb Where to return the opcode byte.
978 */
979DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
980{
981 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
982 if (rcStrict == VINF_SUCCESS)
983 {
984 uint8_t offOpcode = pIemCpu->offOpcode;
985 *pb = pIemCpu->abOpcode[offOpcode];
986 pIemCpu->offOpcode = offOpcode + 1;
987 }
988 else
989 *pb = 0;
990 return rcStrict;
991}
992
993
994/**
995 * Fetches the next opcode byte.
996 *
997 * @returns Strict VBox status code.
998 * @param pIemCpu The IEM state.
999 * @param pu8 Where to return the opcode byte.
1000 */
1001DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1002{
1003 uint8_t const offOpcode = pIemCpu->offOpcode;
1004 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1005 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1006
1007 *pu8 = pIemCpu->abOpcode[offOpcode];
1008 pIemCpu->offOpcode = offOpcode + 1;
1009 return VINF_SUCCESS;
1010}
1011
1012
1013/**
1014 * Fetches the next opcode byte, returns automatically on failure.
1015 *
1016 * @param a_pu8 Where to return the opcode byte.
1017 * @remark Implicitly references pIemCpu.
1018 */
1019#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1020 do \
1021 { \
1022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1023 if (rcStrict2 != VINF_SUCCESS) \
1024 return rcStrict2; \
1025 } while (0)
1026
1027
1028/**
1029 * Fetches the next signed byte from the opcode stream.
1030 *
1031 * @returns Strict VBox status code.
1032 * @param pIemCpu The IEM state.
1033 * @param pi8 Where to return the signed byte.
1034 */
1035DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1036{
1037 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1038}
1039
1040
1041/**
1042 * Fetches the next signed byte from the opcode stream, returning automatically
1043 * on failure.
1044 *
1045 * @param pi8 Where to return the signed byte.
1046 * @remark Implicitly references pIemCpu.
1047 */
1048#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1049 do \
1050 { \
1051 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1052 if (rcStrict2 != VINF_SUCCESS) \
1053 return rcStrict2; \
1054 } while (0)
1055
1056
1057/**
1058 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1059 *
1060 * @returns Strict VBox status code.
1061 * @param pIemCpu The IEM state.
1062 * @param pu16 Where to return the opcode dword.
1063 */
1064DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1065{
1066 uint8_t u8;
1067 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1068 if (rcStrict == VINF_SUCCESS)
1069 *pu16 = (int8_t)u8;
1070 return rcStrict;
1071}
1072
1073
1074/**
1075 * Fetches the next signed byte from the opcode stream, extending it to
1076 * unsigned 16-bit.
1077 *
1078 * @returns Strict VBox status code.
1079 * @param pIemCpu The IEM state.
1080 * @param pu16 Where to return the unsigned word.
1081 */
1082DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1083{
1084 uint8_t const offOpcode = pIemCpu->offOpcode;
1085 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1086 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1087
1088 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1089 pIemCpu->offOpcode = offOpcode + 1;
1090 return VINF_SUCCESS;
1091}
1092
1093
1094/**
1095 * Fetches the next signed byte from the opcode stream and sign-extending it to
1096 * a word, returning automatically on failure.
1097 *
1098 * @param pu16 Where to return the word.
1099 * @remark Implicitly references pIemCpu.
1100 */
1101#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1102 do \
1103 { \
1104 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1105 if (rcStrict2 != VINF_SUCCESS) \
1106 return rcStrict2; \
1107 } while (0)
1108
1109
1110/**
1111 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1112 *
1113 * @returns Strict VBox status code.
1114 * @param pIemCpu The IEM state.
1115 * @param pu32 Where to return the opcode dword.
1116 */
1117DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1118{
1119 uint8_t u8;
1120 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1121 if (rcStrict == VINF_SUCCESS)
1122 *pu32 = (int8_t)u8;
1123 return rcStrict;
1124}
1125
1126
1127/**
1128 * Fetches the next signed byte from the opcode stream, extending it to
1129 * unsigned 32-bit.
1130 *
1131 * @returns Strict VBox status code.
1132 * @param pIemCpu The IEM state.
1133 * @param pu32 Where to return the unsigned dword.
1134 */
1135DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1136{
1137 uint8_t const offOpcode = pIemCpu->offOpcode;
1138 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1139 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1140
1141 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1142 pIemCpu->offOpcode = offOpcode + 1;
1143 return VINF_SUCCESS;
1144}
1145
1146
1147/**
1148 * Fetches the next signed byte from the opcode stream and sign-extending it to
1149 * a word, returning automatically on failure.
1150 *
1151 * @param pu32 Where to return the word.
1152 * @remark Implicitly references pIemCpu.
1153 */
1154#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1155 do \
1156 { \
1157 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1158 if (rcStrict2 != VINF_SUCCESS) \
1159 return rcStrict2; \
1160 } while (0)
1161
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pIemCpu The IEM state.
1168 * @param pu64 Where to return the opcode qword.
1169 */
1170DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1171{
1172 uint8_t u8;
1173 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1174 if (rcStrict == VINF_SUCCESS)
1175 *pu64 = (int8_t)u8;
1176 return rcStrict;
1177}
1178
1179
1180/**
1181 * Fetches the next signed byte from the opcode stream, extending it to
1182 * unsigned 64-bit.
1183 *
1184 * @returns Strict VBox status code.
1185 * @param pIemCpu The IEM state.
1186 * @param pu64 Where to return the unsigned qword.
1187 */
1188DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1189{
1190 uint8_t const offOpcode = pIemCpu->offOpcode;
1191 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1192 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1193
1194 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1195 pIemCpu->offOpcode = offOpcode + 1;
1196 return VINF_SUCCESS;
1197}
1198
1199
1200/**
1201 * Fetches the next signed byte from the opcode stream and sign-extending it to
1202 * a word, returning automatically on failure.
1203 *
1204 * @param pu64 Where to return the word.
1205 * @remark Implicitly references pIemCpu.
1206 */
1207#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1208 do \
1209 { \
1210 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1211 if (rcStrict2 != VINF_SUCCESS) \
1212 return rcStrict2; \
1213 } while (0)
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pu16 Where to return the opcode word.
1222 */
1223DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1230 pIemCpu->offOpcode = offOpcode + 2;
1231 }
1232 else
1233 *pu16 = 0;
1234 return rcStrict;
1235}
1236
1237
1238/**
1239 * Fetches the next opcode word.
1240 *
1241 * @returns Strict VBox status code.
1242 * @param pIemCpu The IEM state.
1243 * @param pu16 Where to return the opcode word.
1244 */
1245DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1246{
1247 uint8_t const offOpcode = pIemCpu->offOpcode;
1248 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1249 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1250
1251 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1252 pIemCpu->offOpcode = offOpcode + 2;
1253 return VINF_SUCCESS;
1254}
1255
1256
1257/**
1258 * Fetches the next opcode word, returns automatically on failure.
1259 *
1260 * @param a_pu16 Where to return the opcode word.
1261 * @remark Implicitly references pIemCpu.
1262 */
1263#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1264 do \
1265 { \
1266 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1267 if (rcStrict2 != VINF_SUCCESS) \
1268 return rcStrict2; \
1269 } while (0)
1270
1271
1272/**
1273 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1274 *
1275 * @returns Strict VBox status code.
1276 * @param pIemCpu The IEM state.
1277 * @param pu32 Where to return the opcode double word.
1278 */
1279DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1280{
1281 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1282 if (rcStrict == VINF_SUCCESS)
1283 {
1284 uint8_t offOpcode = pIemCpu->offOpcode;
1285 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1286 pIemCpu->offOpcode = offOpcode + 2;
1287 }
1288 else
1289 *pu32 = 0;
1290 return rcStrict;
1291}
1292
1293
1294/**
1295 * Fetches the next opcode word, zero extending it to a double word.
1296 *
1297 * @returns Strict VBox status code.
1298 * @param pIemCpu The IEM state.
1299 * @param pu32 Where to return the opcode double word.
1300 */
1301DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1302{
1303 uint8_t const offOpcode = pIemCpu->offOpcode;
1304 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1305 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1306
1307 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1308 pIemCpu->offOpcode = offOpcode + 2;
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Fetches the next opcode word and zero extends it to a double word, returns
1315 * automatically on failure.
1316 *
1317 * @param a_pu32 Where to return the opcode double word.
1318 * @remark Implicitly references pIemCpu.
1319 */
1320#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1321 do \
1322 { \
1323 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1324 if (rcStrict2 != VINF_SUCCESS) \
1325 return rcStrict2; \
1326 } while (0)
1327
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pIemCpu The IEM state.
1334 * @param pu64 Where to return the opcode quad word.
1335 */
1336DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1337{
1338 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1339 if (rcStrict == VINF_SUCCESS)
1340 {
1341 uint8_t offOpcode = pIemCpu->offOpcode;
1342 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1343 pIemCpu->offOpcode = offOpcode + 2;
1344 }
1345 else
1346 *pu64 = 0;
1347 return rcStrict;
1348}
1349
1350
1351/**
1352 * Fetches the next opcode word, zero extending it to a quad word.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pIemCpu The IEM state.
1356 * @param pu64 Where to return the opcode quad word.
1357 */
1358DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1359{
1360 uint8_t const offOpcode = pIemCpu->offOpcode;
1361 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1362 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1363
1364 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1365 pIemCpu->offOpcode = offOpcode + 2;
1366 return VINF_SUCCESS;
1367}
1368
1369
1370/**
1371 * Fetches the next opcode word and zero extends it to a quad word, returns
1372 * automatically on failure.
1373 *
1374 * @param a_pu64 Where to return the opcode quad word.
1375 * @remark Implicitly references pIemCpu.
1376 */
1377#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1378 do \
1379 { \
1380 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1381 if (rcStrict2 != VINF_SUCCESS) \
1382 return rcStrict2; \
1383 } while (0)
1384
1385
1386/**
1387 * Fetches the next signed word from the opcode stream.
1388 *
1389 * @returns Strict VBox status code.
1390 * @param pIemCpu The IEM state.
1391 * @param pi16 Where to return the signed word.
1392 */
1393DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1394{
1395 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1396}
1397
1398
1399/**
1400 * Fetches the next signed word from the opcode stream, returning automatically
1401 * on failure.
1402 *
1403 * @param pi16 Where to return the signed word.
1404 * @remark Implicitly references pIemCpu.
1405 */
1406#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1407 do \
1408 { \
1409 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1410 if (rcStrict2 != VINF_SUCCESS) \
1411 return rcStrict2; \
1412 } while (0)
1413
1414
1415/**
1416 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1417 *
1418 * @returns Strict VBox status code.
1419 * @param pIemCpu The IEM state.
1420 * @param pu32 Where to return the opcode dword.
1421 */
1422DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1423{
1424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1425 if (rcStrict == VINF_SUCCESS)
1426 {
1427 uint8_t offOpcode = pIemCpu->offOpcode;
1428 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1429 pIemCpu->abOpcode[offOpcode + 1],
1430 pIemCpu->abOpcode[offOpcode + 2],
1431 pIemCpu->abOpcode[offOpcode + 3]);
1432 pIemCpu->offOpcode = offOpcode + 4;
1433 }
1434 else
1435 *pu32 = 0;
1436 return rcStrict;
1437}
1438
1439
1440/**
1441 * Fetches the next opcode dword.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pIemCpu The IEM state.
1445 * @param pu32 Where to return the opcode double word.
1446 */
1447DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1448{
1449 uint8_t const offOpcode = pIemCpu->offOpcode;
1450 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1451 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1452
1453 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1454 pIemCpu->abOpcode[offOpcode + 1],
1455 pIemCpu->abOpcode[offOpcode + 2],
1456 pIemCpu->abOpcode[offOpcode + 3]);
1457 pIemCpu->offOpcode = offOpcode + 4;
1458 return VINF_SUCCESS;
1459}
1460
1461
1462/**
1463 * Fetches the next opcode dword, returns automatically on failure.
1464 *
1465 * @param a_pu32 Where to return the opcode dword.
1466 * @remark Implicitly references pIemCpu.
1467 */
1468#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1469 do \
1470 { \
1471 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1472 if (rcStrict2 != VINF_SUCCESS) \
1473 return rcStrict2; \
1474 } while (0)
1475
1476
1477/**
1478 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1479 *
1480 * @returns Strict VBox status code.
1481 * @param pIemCpu The IEM state.
1482 * @param pu32 Where to return the opcode dword.
1483 */
1484DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1485{
1486 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1487 if (rcStrict == VINF_SUCCESS)
1488 {
1489 uint8_t offOpcode = pIemCpu->offOpcode;
1490 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1491 pIemCpu->abOpcode[offOpcode + 1],
1492 pIemCpu->abOpcode[offOpcode + 2],
1493 pIemCpu->abOpcode[offOpcode + 3]);
1494 pIemCpu->offOpcode = offOpcode + 4;
1495 }
1496 else
1497 *pu64 = 0;
1498 return rcStrict;
1499}
1500
1501
1502/**
1503 * Fetches the next opcode dword, zero extending it to a quad word.
1504 *
1505 * @returns Strict VBox status code.
1506 * @param pIemCpu The IEM state.
1507 * @param pu64 Where to return the opcode quad word.
1508 */
1509DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1510{
1511 uint8_t const offOpcode = pIemCpu->offOpcode;
1512 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1513 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1514
1515 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1516 pIemCpu->abOpcode[offOpcode + 1],
1517 pIemCpu->abOpcode[offOpcode + 2],
1518 pIemCpu->abOpcode[offOpcode + 3]);
1519 pIemCpu->offOpcode = offOpcode + 4;
1520 return VINF_SUCCESS;
1521}
1522
1523
1524/**
1525 * Fetches the next opcode dword and zero extends it to a quad word, returns
1526 * automatically on failure.
1527 *
1528 * @param a_pu64 Where to return the opcode quad word.
1529 * @remark Implicitly references pIemCpu.
1530 */
1531#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1532 do \
1533 { \
1534 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1535 if (rcStrict2 != VINF_SUCCESS) \
1536 return rcStrict2; \
1537 } while (0)
1538
1539
1540/**
1541 * Fetches the next signed double word from the opcode stream.
1542 *
1543 * @returns Strict VBox status code.
1544 * @param pIemCpu The IEM state.
1545 * @param pi32 Where to return the signed double word.
1546 */
1547DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1548{
1549 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1550}
1551
1552/**
1553 * Fetches the next signed double word from the opcode stream, returning
1554 * automatically on failure.
1555 *
1556 * @param pi32 Where to return the signed double word.
1557 * @remark Implicitly references pIemCpu.
1558 */
1559#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1560 do \
1561 { \
1562 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1563 if (rcStrict2 != VINF_SUCCESS) \
1564 return rcStrict2; \
1565 } while (0)
1566
1567
1568/**
1569 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1570 *
1571 * @returns Strict VBox status code.
1572 * @param pIemCpu The IEM state.
1573 * @param pu64 Where to return the opcode qword.
1574 */
1575DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1576{
1577 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 uint8_t offOpcode = pIemCpu->offOpcode;
1581 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1582 pIemCpu->abOpcode[offOpcode + 1],
1583 pIemCpu->abOpcode[offOpcode + 2],
1584 pIemCpu->abOpcode[offOpcode + 3]);
1585 pIemCpu->offOpcode = offOpcode + 4;
1586 }
1587 else
1588 *pu64 = 0;
1589 return rcStrict;
1590}
1591
1592
1593/**
1594 * Fetches the next opcode dword, sign extending it into a quad word.
1595 *
1596 * @returns Strict VBox status code.
1597 * @param pIemCpu The IEM state.
1598 * @param pu64 Where to return the opcode quad word.
1599 */
1600DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1601{
1602 uint8_t const offOpcode = pIemCpu->offOpcode;
1603 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1604 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1605
1606 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1607 pIemCpu->abOpcode[offOpcode + 1],
1608 pIemCpu->abOpcode[offOpcode + 2],
1609 pIemCpu->abOpcode[offOpcode + 3]);
1610 *pu64 = i32;
1611 pIemCpu->offOpcode = offOpcode + 4;
1612 return VINF_SUCCESS;
1613}
1614
1615
1616/**
1617 * Fetches the next opcode double word and sign extends it to a quad word,
1618 * returns automatically on failure.
1619 *
1620 * @param a_pu64 Where to return the opcode quad word.
1621 * @remark Implicitly references pIemCpu.
1622 */
1623#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1624 do \
1625 { \
1626 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1627 if (rcStrict2 != VINF_SUCCESS) \
1628 return rcStrict2; \
1629 } while (0)
1630
1631
1632/**
1633 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode qword.
1638 */
1639DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1642 if (rcStrict == VINF_SUCCESS)
1643 {
1644 uint8_t offOpcode = pIemCpu->offOpcode;
1645 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1646 pIemCpu->abOpcode[offOpcode + 1],
1647 pIemCpu->abOpcode[offOpcode + 2],
1648 pIemCpu->abOpcode[offOpcode + 3],
1649 pIemCpu->abOpcode[offOpcode + 4],
1650 pIemCpu->abOpcode[offOpcode + 5],
1651 pIemCpu->abOpcode[offOpcode + 6],
1652 pIemCpu->abOpcode[offOpcode + 7]);
1653 pIemCpu->offOpcode = offOpcode + 8;
1654 }
1655 else
1656 *pu64 = 0;
1657 return rcStrict;
1658}
1659
1660
1661/**
1662 * Fetches the next opcode qword.
1663 *
1664 * @returns Strict VBox status code.
1665 * @param pIemCpu The IEM state.
1666 * @param pu64 Where to return the opcode qword.
1667 */
1668DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1669{
1670 uint8_t const offOpcode = pIemCpu->offOpcode;
1671 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1672 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1673
1674 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1675 pIemCpu->abOpcode[offOpcode + 1],
1676 pIemCpu->abOpcode[offOpcode + 2],
1677 pIemCpu->abOpcode[offOpcode + 3],
1678 pIemCpu->abOpcode[offOpcode + 4],
1679 pIemCpu->abOpcode[offOpcode + 5],
1680 pIemCpu->abOpcode[offOpcode + 6],
1681 pIemCpu->abOpcode[offOpcode + 7]);
1682 pIemCpu->offOpcode = offOpcode + 8;
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/**
1688 * Fetches the next opcode quad word, returns automatically on failure.
1689 *
1690 * @param a_pu64 Where to return the opcode quad word.
1691 * @remark Implicitly references pIemCpu.
1692 */
1693#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1694 do \
1695 { \
1696 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1697 if (rcStrict2 != VINF_SUCCESS) \
1698 return rcStrict2; \
1699 } while (0)
1700
1701
1702/** @name Misc Worker Functions.
1703 * @{
1704 */
1705
1706
1707/**
1708 * Validates a new SS segment.
1709 *
1710 * @returns VBox strict status code.
1711 * @param pIemCpu The IEM per CPU instance data.
1712 * @param pCtx The CPU context.
1713 * @param NewSS The new SS selctor.
1714 * @param uCpl The CPL to load the stack for.
1715 * @param pDesc Where to return the descriptor.
1716 */
1717static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1718{
1719 NOREF(pCtx);
1720
1721 /* Null selectors are not allowed (we're not called for dispatching
1722 interrupts with SS=0 in long mode). */
1723 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1724 {
1725 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1726 return iemRaiseGeneralProtectionFault0(pIemCpu);
1727 }
1728
1729 /*
1730 * Read the descriptor.
1731 */
1732 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1733 if (rcStrict != VINF_SUCCESS)
1734 return rcStrict;
1735
1736 /*
1737 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1738 */
1739 if (!pDesc->Legacy.Gen.u1DescType)
1740 {
1741 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1742 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1743 }
1744
1745 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1746 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1747 {
1748 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1749 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1750 }
1751 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1752 if ((NewSS & X86_SEL_RPL) != uCpl)
1753 {
1754 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1755 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1756 }
1757 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1758 {
1759 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1760 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1761 }
1762
1763 /* Is it there? */
1764 /** @todo testcase: Is this checked before the canonical / limit check below? */
1765 if (!pDesc->Legacy.Gen.u1Present)
1766 {
1767 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1768 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1769 }
1770
1771 return VINF_SUCCESS;
1772}
1773
1774
1775/** @} */
1776
1777/** @name Raising Exceptions.
1778 *
1779 * @{
1780 */
1781
1782/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1783 * @{ */
1784/** CPU exception. */
1785#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1786/** External interrupt (from PIC, APIC, whatever). */
1787#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1788/** Software interrupt (int, into or bound). */
1789#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1790/** Takes an error code. */
1791#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1792/** Takes a CR2. */
1793#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1794/** Generated by the breakpoint instruction. */
1795#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1796/** @} */
1797
1798/**
1799 * Loads the specified stack far pointer from the TSS.
1800 *
1801 * @returns VBox strict status code.
1802 * @param pIemCpu The IEM per CPU instance data.
1803 * @param pCtx The CPU context.
1804 * @param uCpl The CPL to load the stack for.
1805 * @param pSelSS Where to return the new stack segment.
1806 * @param puEsp Where to return the new stack pointer.
1807 */
1808static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1809 PRTSEL pSelSS, uint32_t *puEsp)
1810{
1811 VBOXSTRICTRC rcStrict;
1812 Assert(uCpl < 4);
1813 *puEsp = 0; /* make gcc happy */
1814 *pSelSS = 0; /* make gcc happy */
1815
1816 switch (pCtx->tr.Attr.n.u4Type)
1817 {
1818 /*
1819 * 16-bit TSS (X86TSS16).
1820 */
1821 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1822 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1823 {
1824 uint32_t off = uCpl * 4 + 2;
1825 if (off + 4 > pCtx->tr.u32Limit)
1826 {
1827 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1829 }
1830
1831 uint32_t u32Tmp = 0; /* gcc maybe... */
1832 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1833 if (rcStrict == VINF_SUCCESS)
1834 {
1835 *puEsp = RT_LOWORD(u32Tmp);
1836 *pSelSS = RT_HIWORD(u32Tmp);
1837 return VINF_SUCCESS;
1838 }
1839 break;
1840 }
1841
1842 /*
1843 * 32-bit TSS (X86TSS32).
1844 */
1845 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1846 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1847 {
1848 uint32_t off = uCpl * 8 + 4;
1849 if (off + 7 > pCtx->tr.u32Limit)
1850 {
1851 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1852 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1853 }
1854
1855 uint64_t u64Tmp;
1856 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1857 if (rcStrict == VINF_SUCCESS)
1858 {
1859 *puEsp = u64Tmp & UINT32_MAX;
1860 *pSelSS = (RTSEL)(u64Tmp >> 32);
1861 return VINF_SUCCESS;
1862 }
1863 break;
1864 }
1865
1866 default:
1867 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1868 }
1869 return rcStrict;
1870}
1871
1872
1873/**
1874 * Adjust the CPU state according to the exception being raised.
1875 *
1876 * @param pCtx The CPU context.
1877 * @param u8Vector The exception that has been raised.
1878 */
1879DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1880{
1881 switch (u8Vector)
1882 {
1883 case X86_XCPT_DB:
1884 pCtx->dr[7] &= ~X86_DR7_GD;
1885 break;
1886 /** @todo Read the AMD and Intel exception reference... */
1887 }
1888}
1889
1890
1891/**
1892 * Implements exceptions and interrupts for real mode.
1893 *
1894 * @returns VBox strict status code.
1895 * @param pIemCpu The IEM per CPU instance data.
1896 * @param pCtx The CPU context.
1897 * @param cbInstr The number of bytes to offset rIP by in the return
1898 * address.
1899 * @param u8Vector The interrupt / exception vector number.
1900 * @param fFlags The flags.
1901 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1902 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1903 */
1904static VBOXSTRICTRC
1905iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1906 PCPUMCTX pCtx,
1907 uint8_t cbInstr,
1908 uint8_t u8Vector,
1909 uint32_t fFlags,
1910 uint16_t uErr,
1911 uint64_t uCr2)
1912{
1913 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1914 NOREF(uErr); NOREF(uCr2);
1915
1916 /*
1917 * Read the IDT entry.
1918 */
1919 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1920 {
1921 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1922 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1923 }
1924 RTFAR16 Idte;
1925 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1926 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1927 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1928 return rcStrict;
1929
1930 /*
1931 * Push the stack frame.
1932 */
1933 uint16_t *pu16Frame;
1934 uint64_t uNewRsp;
1935 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1936 if (rcStrict != VINF_SUCCESS)
1937 return rcStrict;
1938
1939 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1940 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
1941 pu16Frame[0] = pCtx->ip + cbInstr;
1942 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1943 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1944 return rcStrict;
1945
1946 /*
1947 * Load the vector address into cs:ip and make exception specific state
1948 * adjustments.
1949 */
1950 pCtx->cs.Sel = Idte.sel;
1951 pCtx->cs.ValidSel = Idte.sel;
1952 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1953 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
1954 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1955 pCtx->rip = Idte.off;
1956 pCtx->eflags.Bits.u1IF = 0;
1957
1958 /** @todo do we actually do this in real mode? */
1959 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1960 iemRaiseXcptAdjustState(pCtx, u8Vector);
1961
1962 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1963}
1964
1965
1966/**
1967 * Implements exceptions and interrupts for protected mode.
1968 *
1969 * @returns VBox strict status code.
1970 * @param pIemCpu The IEM per CPU instance data.
1971 * @param pCtx The CPU context.
1972 * @param cbInstr The number of bytes to offset rIP by in the return
1973 * address.
1974 * @param u8Vector The interrupt / exception vector number.
1975 * @param fFlags The flags.
1976 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1977 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1978 */
1979static VBOXSTRICTRC
1980iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1981 PCPUMCTX pCtx,
1982 uint8_t cbInstr,
1983 uint8_t u8Vector,
1984 uint32_t fFlags,
1985 uint16_t uErr,
1986 uint64_t uCr2)
1987{
1988 NOREF(cbInstr);
1989
1990 /*
1991 * Read the IDT entry.
1992 */
1993 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1994 {
1995 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1996 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1997 }
1998 X86DESC Idte;
1999 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2000 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2001 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2002 return rcStrict;
2003 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2004 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2005 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2006
2007 /*
2008 * Check the descriptor type, DPL and such.
2009 * ASSUMES this is done in the same order as described for call-gate calls.
2010 */
2011 if (Idte.Gate.u1DescType)
2012 {
2013 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2014 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2015 }
2016 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2017 switch (Idte.Gate.u4Type)
2018 {
2019 case X86_SEL_TYPE_SYS_UNDEFINED:
2020 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2021 case X86_SEL_TYPE_SYS_LDT:
2022 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2023 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2024 case X86_SEL_TYPE_SYS_UNDEFINED2:
2025 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2026 case X86_SEL_TYPE_SYS_UNDEFINED3:
2027 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2028 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2029 case X86_SEL_TYPE_SYS_UNDEFINED4:
2030 {
2031 /** @todo check what actually happens when the type is wrong...
2032 * esp. call gates. */
2033 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2034 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2035 }
2036
2037 case X86_SEL_TYPE_SYS_286_INT_GATE:
2038 case X86_SEL_TYPE_SYS_386_INT_GATE:
2039 fEflToClear |= X86_EFL_IF;
2040 break;
2041
2042 case X86_SEL_TYPE_SYS_TASK_GATE:
2043 /** @todo task gates. */
2044 AssertFailedReturn(VERR_NOT_SUPPORTED);
2045
2046 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2047 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2048 break;
2049
2050 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2051 }
2052
2053 /* Check DPL against CPL if applicable. */
2054 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2055 {
2056 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2057 {
2058 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2059 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2060 }
2061 }
2062
2063 /* Is it there? */
2064 if (!Idte.Gate.u1Present)
2065 {
2066 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2067 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2068 }
2069
2070 /* A null CS is bad. */
2071 RTSEL NewCS = Idte.Gate.u16Sel;
2072 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2073 {
2074 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2075 return iemRaiseGeneralProtectionFault0(pIemCpu);
2076 }
2077
2078 /* Fetch the descriptor for the new CS. */
2079 IEMSELDESC DescCS;
2080 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2081 if (rcStrict != VINF_SUCCESS)
2082 {
2083 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2084 return rcStrict;
2085 }
2086
2087 /* Must be a code segment. */
2088 if (!DescCS.Legacy.Gen.u1DescType)
2089 {
2090 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2091 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2092 }
2093 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2094 {
2095 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2096 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2097 }
2098
2099 /* Don't allow lowering the privilege level. */
2100 /** @todo Does the lowering of privileges apply to software interrupts
2101 * only? This has bearings on the more-privileged or
2102 * same-privilege stack behavior further down. A testcase would
2103 * be nice. */
2104 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2105 {
2106 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2107 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2108 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2109 }
2110 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
2111
2112 /* Check the new EIP against the new CS limit. */
2113 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2114 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2115 ? Idte.Gate.u16OffsetLow
2116 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2117 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2118 if (uNewEip > cbLimitCS)
2119 {
2120 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2121 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2122 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2123 }
2124
2125 /* Make sure the selector is present. */
2126 if (!DescCS.Legacy.Gen.u1Present)
2127 {
2128 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2129 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2130 }
2131
2132 /*
2133 * If the privilege level changes, we need to get a new stack from the TSS.
2134 * This in turns means validating the new SS and ESP...
2135 */
2136 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2137 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2138 if (uNewCpl != pIemCpu->uCpl)
2139 {
2140 RTSEL NewSS;
2141 uint32_t uNewEsp;
2142 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2143 if (rcStrict != VINF_SUCCESS)
2144 return rcStrict;
2145
2146 IEMSELDESC DescSS;
2147 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2148 if (rcStrict != VINF_SUCCESS)
2149 return rcStrict;
2150
2151 /* Check that there is sufficient space for the stack frame. */
2152 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2153 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2154 {
2155 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2156 }
2157
2158 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2159 if ( uNewEsp - 1 > cbLimitSS
2160 || uNewEsp < cbStackFrame)
2161 {
2162 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2163 u8Vector, NewSS, uNewEsp, cbStackFrame));
2164 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2165 }
2166
2167 /*
2168 * Start making changes.
2169 */
2170
2171 /* Create the stack frame. */
2172 RTPTRUNION uStackFrame;
2173 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2174 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2175 if (rcStrict != VINF_SUCCESS)
2176 return rcStrict;
2177 void * const pvStackFrame = uStackFrame.pv;
2178
2179 if (fFlags & IEM_XCPT_FLAGS_ERR)
2180 *uStackFrame.pu32++ = uErr;
2181 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2182 ? pCtx->eip + cbInstr : pCtx->eip;
2183 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2184 uStackFrame.pu32[2] = pCtx->eflags.u;
2185 uStackFrame.pu32[3] = pCtx->esp;
2186 uStackFrame.pu32[4] = pCtx->ss.Sel;
2187 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2188 if (rcStrict != VINF_SUCCESS)
2189 return rcStrict;
2190
2191 /* Mark the selectors 'accessed' (hope this is the correct time). */
2192 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2193 * after pushing the stack frame? (Write protect the gdt + stack to
2194 * find out.) */
2195 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2196 {
2197 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2198 if (rcStrict != VINF_SUCCESS)
2199 return rcStrict;
2200 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2201 }
2202
2203 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2204 {
2205 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2209 }
2210
2211 /*
2212 * Start comitting the register changes (joins with the DPL=CPL branch).
2213 */
2214 pCtx->ss.Sel = NewSS;
2215 pCtx->ss.ValidSel = NewSS;
2216 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2217 pCtx->ss.u32Limit = cbLimitSS;
2218 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2219 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2220 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2221 pIemCpu->uCpl = uNewCpl;
2222 }
2223 /*
2224 * Same privilege, no stack change and smaller stack frame.
2225 */
2226 else
2227 {
2228 uint64_t uNewRsp;
2229 RTPTRUNION uStackFrame;
2230 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2231 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2232 if (rcStrict != VINF_SUCCESS)
2233 return rcStrict;
2234 void * const pvStackFrame = uStackFrame.pv;
2235
2236 if (fFlags & IEM_XCPT_FLAGS_ERR)
2237 *uStackFrame.pu32++ = uErr;
2238 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2239 ? pCtx->eip + cbInstr : pCtx->eip;
2240 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2241 uStackFrame.pu32[2] = pCtx->eflags.u;
2242 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2243 if (rcStrict != VINF_SUCCESS)
2244 return rcStrict;
2245
2246 /* Mark the CS selector as 'accessed'. */
2247 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2248 {
2249 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2250 if (rcStrict != VINF_SUCCESS)
2251 return rcStrict;
2252 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2253 }
2254
2255 /*
2256 * Start committing the register changes (joins with the other branch).
2257 */
2258 pCtx->rsp = uNewRsp;
2259 }
2260
2261 /* ... register committing continues. */
2262 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2263 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2264 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2265 pCtx->cs.u32Limit = cbLimitCS;
2266 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2267 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2268
2269 pCtx->rip = uNewEip;
2270 pCtx->rflags.u &= ~fEflToClear;
2271
2272 if (fFlags & IEM_XCPT_FLAGS_CR2)
2273 pCtx->cr2 = uCr2;
2274
2275 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2276 iemRaiseXcptAdjustState(pCtx, u8Vector);
2277
2278 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2279}
2280
2281
2282/**
2283 * Implements exceptions and interrupts for V8086 mode.
2284 *
2285 * @returns VBox strict status code.
2286 * @param pIemCpu The IEM per CPU instance data.
2287 * @param pCtx The CPU context.
2288 * @param cbInstr The number of bytes to offset rIP by in the return
2289 * address.
2290 * @param u8Vector The interrupt / exception vector number.
2291 * @param fFlags The flags.
2292 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2293 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2294 */
2295static VBOXSTRICTRC
2296iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2297 PCPUMCTX pCtx,
2298 uint8_t cbInstr,
2299 uint8_t u8Vector,
2300 uint32_t fFlags,
2301 uint16_t uErr,
2302 uint64_t uCr2)
2303{
2304 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2305 /** @todo implement me. */
2306 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2307}
2308
2309
2310/**
2311 * Implements exceptions and interrupts for long mode.
2312 *
2313 * @returns VBox strict status code.
2314 * @param pIemCpu The IEM per CPU instance data.
2315 * @param pCtx The CPU context.
2316 * @param cbInstr The number of bytes to offset rIP by in the return
2317 * address.
2318 * @param u8Vector The interrupt / exception vector number.
2319 * @param fFlags The flags.
2320 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2321 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2322 */
2323static VBOXSTRICTRC
2324iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2325 PCPUMCTX pCtx,
2326 uint8_t cbInstr,
2327 uint8_t u8Vector,
2328 uint32_t fFlags,
2329 uint16_t uErr,
2330 uint64_t uCr2)
2331{
2332 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2333 /** @todo implement me. */
2334 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
2335}
2336
2337
2338/**
2339 * Implements exceptions and interrupts.
2340 *
2341 * All exceptions and interrupts goes thru this function!
2342 *
2343 * @returns VBox strict status code.
2344 * @param pIemCpu The IEM per CPU instance data.
2345 * @param cbInstr The number of bytes to offset rIP by in the return
2346 * address.
2347 * @param u8Vector The interrupt / exception vector number.
2348 * @param fFlags The flags.
2349 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2350 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2351 */
2352DECL_NO_INLINE(static, VBOXSTRICTRC)
2353iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2354 uint8_t cbInstr,
2355 uint8_t u8Vector,
2356 uint32_t fFlags,
2357 uint16_t uErr,
2358 uint64_t uCr2)
2359{
2360 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2361
2362 /*
2363 * Do recursion accounting.
2364 */
2365 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2366 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2367 if (pIemCpu->cXcptRecursions == 0)
2368 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2369 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2370 else
2371 {
2372 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2373 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2374
2375 /** @todo double and tripple faults. */
2376 if (pIemCpu->cXcptRecursions >= 3)
2377 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2378
2379 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2380 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2381 {
2382 ....
2383 } */
2384 }
2385 pIemCpu->cXcptRecursions++;
2386 pIemCpu->uCurXcpt = u8Vector;
2387 pIemCpu->fCurXcpt = fFlags;
2388
2389 /*
2390 * Extensive logging.
2391 */
2392#if defined(LOG_ENABLED) && defined(IN_RING3)
2393 if (LogIs3Enabled())
2394 {
2395 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2396 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2397 char szRegs[4096];
2398 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2399 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2400 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2401 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2402 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2403 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2404 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2405 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2406 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2407 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2408 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2409 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2410 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2411 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2412 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2413 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2414 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2415 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2416 " efer=%016VR{efer}\n"
2417 " pat=%016VR{pat}\n"
2418 " sf_mask=%016VR{sf_mask}\n"
2419 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2420 " lstar=%016VR{lstar}\n"
2421 " star=%016VR{star} cstar=%016VR{cstar}\n"
2422 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2423 );
2424
2425 char szInstr[256];
2426 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2427 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2428 szInstr, sizeof(szInstr), NULL);
2429 Log3(("%s%s\n", szRegs, szInstr));
2430 }
2431#endif /* LOG_ENABLED */
2432
2433 /*
2434 * Call the mode specific worker function.
2435 */
2436 VBOXSTRICTRC rcStrict;
2437 if (!(pCtx->cr0 & X86_CR0_PE))
2438 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2439 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2440 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2441 else if (!pCtx->eflags.Bits.u1VM)
2442 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2443 else
2444 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2445
2446 /*
2447 * Unwind.
2448 */
2449 pIemCpu->cXcptRecursions--;
2450 pIemCpu->uCurXcpt = uPrevXcpt;
2451 pIemCpu->fCurXcpt = fPrevXcpt;
2452 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2453 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2454 return rcStrict;
2455}
2456
2457
2458/** \#DE - 00. */
2459DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2460{
2461 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2462}
2463
2464
2465/** \#DB - 01. */
2466DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2467{
2468 /** @todo set/clear RF. */
2469 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2470}
2471
2472
2473/** \#UD - 06. */
2474DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2475{
2476 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2477}
2478
2479
2480/** \#NM - 07. */
2481DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2482{
2483 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2484}
2485
2486
2487#ifdef SOME_UNUSED_FUNCTION
2488/** \#TS(err) - 0a. */
2489DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2490{
2491 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2492}
2493#endif
2494
2495
2496/** \#TS(tr) - 0a. */
2497DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2498{
2499 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2500 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2501}
2502
2503
2504/** \#NP(err) - 0b. */
2505DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2506{
2507 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2508}
2509
2510
2511/** \#NP(seg) - 0b. */
2512DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2513{
2514 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2515 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2516}
2517
2518
2519/** \#NP(sel) - 0b. */
2520DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2521{
2522 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2523 uSel & ~X86_SEL_RPL, 0);
2524}
2525
2526
2527/** \#SS(seg) - 0c. */
2528DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2529{
2530 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2531 uSel & ~X86_SEL_RPL, 0);
2532}
2533
2534
2535/** \#GP(n) - 0d. */
2536DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2537{
2538 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2539}
2540
2541
2542/** \#GP(0) - 0d. */
2543DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2544{
2545 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2546}
2547
2548
2549/** \#GP(sel) - 0d. */
2550DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2551{
2552 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2553 Sel & ~X86_SEL_RPL, 0);
2554}
2555
2556
2557/** \#GP(0) - 0d. */
2558DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2559{
2560 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2561}
2562
2563
2564/** \#GP(sel) - 0d. */
2565DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2566{
2567 NOREF(iSegReg); NOREF(fAccess);
2568 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2569 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2570}
2571
2572
2573/** \#GP(sel) - 0d. */
2574DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2575{
2576 NOREF(Sel);
2577 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2578}
2579
2580
2581/** \#GP(sel) - 0d. */
2582DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2583{
2584 NOREF(iSegReg); NOREF(fAccess);
2585 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2586}
2587
2588
2589/** \#PF(n) - 0e. */
2590DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2591{
2592 uint16_t uErr;
2593 switch (rc)
2594 {
2595 case VERR_PAGE_NOT_PRESENT:
2596 case VERR_PAGE_TABLE_NOT_PRESENT:
2597 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2598 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2599 uErr = 0;
2600 break;
2601
2602 default:
2603 AssertMsgFailed(("%Rrc\n", rc));
2604 case VERR_ACCESS_DENIED:
2605 uErr = X86_TRAP_PF_P;
2606 break;
2607
2608 /** @todo reserved */
2609 }
2610
2611 if (pIemCpu->uCpl == 3)
2612 uErr |= X86_TRAP_PF_US;
2613
2614 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2615 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2616 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2617 uErr |= X86_TRAP_PF_ID;
2618
2619 /* Note! RW access callers reporting a WRITE protection fault, will clear
2620 the READ flag before calling. So, read-modify-write accesses (RW)
2621 can safely be reported as READ faults. */
2622 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2623 uErr |= X86_TRAP_PF_RW;
2624
2625 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2626 uErr, GCPtrWhere);
2627}
2628
2629
2630/** \#MF(0) - 10. */
2631DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2632{
2633 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2634}
2635
2636
2637/** \#AC(0) - 11. */
2638DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2639{
2640 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2641}
2642
2643
2644/**
2645 * Macro for calling iemCImplRaiseDivideError().
2646 *
2647 * This enables us to add/remove arguments and force different levels of
2648 * inlining as we wish.
2649 *
2650 * @return Strict VBox status code.
2651 */
2652#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2653IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2654{
2655 NOREF(cbInstr);
2656 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2657}
2658
2659
2660/**
2661 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2662 *
2663 * This enables us to add/remove arguments and force different levels of
2664 * inlining as we wish.
2665 *
2666 * @return Strict VBox status code.
2667 */
2668#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2669IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2670{
2671 NOREF(cbInstr);
2672 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2673}
2674
2675
2676/**
2677 * Macro for calling iemCImplRaiseInvalidOpcode().
2678 *
2679 * This enables us to add/remove arguments and force different levels of
2680 * inlining as we wish.
2681 *
2682 * @return Strict VBox status code.
2683 */
2684#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2685IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2686{
2687 NOREF(cbInstr);
2688 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2689}
2690
2691
2692/** @} */
2693
2694
2695/*
2696 *
2697 * Helpers routines.
2698 * Helpers routines.
2699 * Helpers routines.
2700 *
2701 */
2702
2703/**
2704 * Recalculates the effective operand size.
2705 *
2706 * @param pIemCpu The IEM state.
2707 */
2708static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2709{
2710 switch (pIemCpu->enmCpuMode)
2711 {
2712 case IEMMODE_16BIT:
2713 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2714 break;
2715 case IEMMODE_32BIT:
2716 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2717 break;
2718 case IEMMODE_64BIT:
2719 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2720 {
2721 case 0:
2722 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2723 break;
2724 case IEM_OP_PRF_SIZE_OP:
2725 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2726 break;
2727 case IEM_OP_PRF_SIZE_REX_W:
2728 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2729 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2730 break;
2731 }
2732 break;
2733 default:
2734 AssertFailed();
2735 }
2736}
2737
2738
2739/**
2740 * Sets the default operand size to 64-bit and recalculates the effective
2741 * operand size.
2742 *
2743 * @param pIemCpu The IEM state.
2744 */
2745static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2746{
2747 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2748 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2749 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2750 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2751 else
2752 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2753}
2754
2755
2756/*
2757 *
2758 * Common opcode decoders.
2759 * Common opcode decoders.
2760 * Common opcode decoders.
2761 *
2762 */
2763//#include <iprt/mem.h>
2764
2765/**
2766 * Used to add extra details about a stub case.
2767 * @param pIemCpu The IEM per CPU state.
2768 */
2769static void iemOpStubMsg2(PIEMCPU pIemCpu)
2770{
2771#if defined(LOG_ENABLED) && defined(IN_RING3)
2772 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2773 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2774 char szRegs[4096];
2775 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2776 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2777 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2778 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2779 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2780 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2781 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2782 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2783 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2784 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2785 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2786 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2787 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2788 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2789 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2790 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2791 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2792 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2793 " efer=%016VR{efer}\n"
2794 " pat=%016VR{pat}\n"
2795 " sf_mask=%016VR{sf_mask}\n"
2796 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2797 " lstar=%016VR{lstar}\n"
2798 " star=%016VR{star} cstar=%016VR{cstar}\n"
2799 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2800 );
2801
2802 char szInstr[256];
2803 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2804 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2805 szInstr, sizeof(szInstr), NULL);
2806
2807 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2808#else
2809 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2810#endif
2811}
2812
2813/**
2814 * Complains about a stub.
2815 *
2816 * Providing two versions of this macro, one for daily use and one for use when
2817 * working on IEM.
2818 */
2819#if 0
2820# define IEMOP_BITCH_ABOUT_STUB() \
2821 do { \
2822 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2823 iemOpStubMsg2(pIemCpu); \
2824 RTAssertPanic(); \
2825 } while (0)
2826#else
2827# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
2828#endif
2829
2830/** Stubs an opcode. */
2831#define FNIEMOP_STUB(a_Name) \
2832 FNIEMOP_DEF(a_Name) \
2833 { \
2834 IEMOP_BITCH_ABOUT_STUB(); \
2835 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2836 } \
2837 typedef int ignore_semicolon
2838
2839/** Stubs an opcode. */
2840#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2841 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2842 { \
2843 IEMOP_BITCH_ABOUT_STUB(); \
2844 NOREF(a_Name0); \
2845 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2846 } \
2847 typedef int ignore_semicolon
2848
2849/** Stubs an opcode which currently should raise \#UD. */
2850#define FNIEMOP_UD_STUB(a_Name) \
2851 FNIEMOP_DEF(a_Name) \
2852 { \
2853 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2854 return IEMOP_RAISE_INVALID_OPCODE(); \
2855 } \
2856 typedef int ignore_semicolon
2857
2858/** Stubs an opcode which currently should raise \#UD. */
2859#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2860 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2861 { \
2862 NOREF(a_Name0); \
2863 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2864 return IEMOP_RAISE_INVALID_OPCODE(); \
2865 } \
2866 typedef int ignore_semicolon
2867
2868
2869
2870/** @name Register Access.
2871 * @{
2872 */
2873
2874/**
2875 * Gets a reference (pointer) to the specified hidden segment register.
2876 *
2877 * @returns Hidden register reference.
2878 * @param pIemCpu The per CPU data.
2879 * @param iSegReg The segment register.
2880 */
2881static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2882{
2883 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2884 PCPUMSELREG pSReg;
2885 switch (iSegReg)
2886 {
2887 case X86_SREG_ES: pSReg = &pCtx->es; break;
2888 case X86_SREG_CS: pSReg = &pCtx->cs; break;
2889 case X86_SREG_SS: pSReg = &pCtx->ss; break;
2890 case X86_SREG_DS: pSReg = &pCtx->ds; break;
2891 case X86_SREG_FS: pSReg = &pCtx->fs; break;
2892 case X86_SREG_GS: pSReg = &pCtx->gs; break;
2893 default:
2894 AssertFailedReturn(NULL);
2895 }
2896#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2897 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
2898 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
2899#else
2900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2901#endif
2902 return pSReg;
2903}
2904
2905
2906/**
2907 * Gets a reference (pointer) to the specified segment register (the selector
2908 * value).
2909 *
2910 * @returns Pointer to the selector variable.
2911 * @param pIemCpu The per CPU data.
2912 * @param iSegReg The segment register.
2913 */
2914static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2915{
2916 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2917 switch (iSegReg)
2918 {
2919 case X86_SREG_ES: return &pCtx->es.Sel;
2920 case X86_SREG_CS: return &pCtx->cs.Sel;
2921 case X86_SREG_SS: return &pCtx->ss.Sel;
2922 case X86_SREG_DS: return &pCtx->ds.Sel;
2923 case X86_SREG_FS: return &pCtx->fs.Sel;
2924 case X86_SREG_GS: return &pCtx->gs.Sel;
2925 }
2926 AssertFailedReturn(NULL);
2927}
2928
2929
2930/**
2931 * Fetches the selector value of a segment register.
2932 *
2933 * @returns The selector value.
2934 * @param pIemCpu The per CPU data.
2935 * @param iSegReg The segment register.
2936 */
2937static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2938{
2939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2940 switch (iSegReg)
2941 {
2942 case X86_SREG_ES: return pCtx->es.Sel;
2943 case X86_SREG_CS: return pCtx->cs.Sel;
2944 case X86_SREG_SS: return pCtx->ss.Sel;
2945 case X86_SREG_DS: return pCtx->ds.Sel;
2946 case X86_SREG_FS: return pCtx->fs.Sel;
2947 case X86_SREG_GS: return pCtx->gs.Sel;
2948 }
2949 AssertFailedReturn(0xffff);
2950}
2951
2952
2953/**
2954 * Gets a reference (pointer) to the specified general register.
2955 *
2956 * @returns Register reference.
2957 * @param pIemCpu The per CPU data.
2958 * @param iReg The general register.
2959 */
2960static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2961{
2962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2963 switch (iReg)
2964 {
2965 case X86_GREG_xAX: return &pCtx->rax;
2966 case X86_GREG_xCX: return &pCtx->rcx;
2967 case X86_GREG_xDX: return &pCtx->rdx;
2968 case X86_GREG_xBX: return &pCtx->rbx;
2969 case X86_GREG_xSP: return &pCtx->rsp;
2970 case X86_GREG_xBP: return &pCtx->rbp;
2971 case X86_GREG_xSI: return &pCtx->rsi;
2972 case X86_GREG_xDI: return &pCtx->rdi;
2973 case X86_GREG_x8: return &pCtx->r8;
2974 case X86_GREG_x9: return &pCtx->r9;
2975 case X86_GREG_x10: return &pCtx->r10;
2976 case X86_GREG_x11: return &pCtx->r11;
2977 case X86_GREG_x12: return &pCtx->r12;
2978 case X86_GREG_x13: return &pCtx->r13;
2979 case X86_GREG_x14: return &pCtx->r14;
2980 case X86_GREG_x15: return &pCtx->r15;
2981 }
2982 AssertFailedReturn(NULL);
2983}
2984
2985
2986/**
2987 * Gets a reference (pointer) to the specified 8-bit general register.
2988 *
2989 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2990 *
2991 * @returns Register reference.
2992 * @param pIemCpu The per CPU data.
2993 * @param iReg The register.
2994 */
2995static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2996{
2997 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2998 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2999
3000 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3001 if (iReg >= 4)
3002 pu8Reg++;
3003 return pu8Reg;
3004}
3005
3006
3007/**
3008 * Fetches the value of a 8-bit general register.
3009 *
3010 * @returns The register value.
3011 * @param pIemCpu The per CPU data.
3012 * @param iReg The register.
3013 */
3014static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3015{
3016 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3017 return *pbSrc;
3018}
3019
3020
3021/**
3022 * Fetches the value of a 16-bit general register.
3023 *
3024 * @returns The register value.
3025 * @param pIemCpu The per CPU data.
3026 * @param iReg The register.
3027 */
3028static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3029{
3030 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3031}
3032
3033
3034/**
3035 * Fetches the value of a 32-bit general register.
3036 *
3037 * @returns The register value.
3038 * @param pIemCpu The per CPU data.
3039 * @param iReg The register.
3040 */
3041static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3042{
3043 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3044}
3045
3046
3047/**
3048 * Fetches the value of a 64-bit general register.
3049 *
3050 * @returns The register value.
3051 * @param pIemCpu The per CPU data.
3052 * @param iReg The register.
3053 */
3054static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3055{
3056 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3057}
3058
3059
3060/**
3061 * Is the FPU state in FXSAVE format or not.
3062 *
3063 * @returns true if it is, false if it's in FNSAVE.
3064 * @param pVCpu Pointer to the VMCPU.
3065 */
3066DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3067{
3068#ifdef RT_ARCH_AMD64
3069 NOREF(pIemCpu);
3070 return true;
3071#else
3072 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3073 return true;
3074#endif
3075}
3076
3077
3078/**
3079 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3080 *
3081 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3082 * segment limit.
3083 *
3084 * @param pIemCpu The per CPU data.
3085 * @param offNextInstr The offset of the next instruction.
3086 */
3087static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3088{
3089 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3090 switch (pIemCpu->enmEffOpSize)
3091 {
3092 case IEMMODE_16BIT:
3093 {
3094 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3095 if ( uNewIp > pCtx->cs.u32Limit
3096 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3097 return iemRaiseGeneralProtectionFault0(pIemCpu);
3098 pCtx->rip = uNewIp;
3099 break;
3100 }
3101
3102 case IEMMODE_32BIT:
3103 {
3104 Assert(pCtx->rip <= UINT32_MAX);
3105 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3106
3107 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3108 if (uNewEip > pCtx->cs.u32Limit)
3109 return iemRaiseGeneralProtectionFault0(pIemCpu);
3110 pCtx->rip = uNewEip;
3111 break;
3112 }
3113
3114 case IEMMODE_64BIT:
3115 {
3116 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3117
3118 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3119 if (!IEM_IS_CANONICAL(uNewRip))
3120 return iemRaiseGeneralProtectionFault0(pIemCpu);
3121 pCtx->rip = uNewRip;
3122 break;
3123 }
3124
3125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3126 }
3127
3128 return VINF_SUCCESS;
3129}
3130
3131
3132/**
3133 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3134 *
3135 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3136 * segment limit.
3137 *
3138 * @returns Strict VBox status code.
3139 * @param pIemCpu The per CPU data.
3140 * @param offNextInstr The offset of the next instruction.
3141 */
3142static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3143{
3144 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3145 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3146
3147 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3148 if ( uNewIp > pCtx->cs.u32Limit
3149 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3150 return iemRaiseGeneralProtectionFault0(pIemCpu);
3151 /** @todo Test 16-bit jump in 64-bit mode. */
3152 pCtx->rip = uNewIp;
3153
3154 return VINF_SUCCESS;
3155}
3156
3157
3158/**
3159 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3160 *
3161 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3162 * segment limit.
3163 *
3164 * @returns Strict VBox status code.
3165 * @param pIemCpu The per CPU data.
3166 * @param offNextInstr The offset of the next instruction.
3167 */
3168static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3169{
3170 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3171 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3172
3173 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3174 {
3175 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3176
3177 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3178 if (uNewEip > pCtx->cs.u32Limit)
3179 return iemRaiseGeneralProtectionFault0(pIemCpu);
3180 pCtx->rip = uNewEip;
3181 }
3182 else
3183 {
3184 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3185
3186 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3187 if (!IEM_IS_CANONICAL(uNewRip))
3188 return iemRaiseGeneralProtectionFault0(pIemCpu);
3189 pCtx->rip = uNewRip;
3190 }
3191 return VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Performs a near jump to the specified address.
3197 *
3198 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3199 * segment limit.
3200 *
3201 * @param pIemCpu The per CPU data.
3202 * @param uNewRip The new RIP value.
3203 */
3204static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3205{
3206 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3207 switch (pIemCpu->enmEffOpSize)
3208 {
3209 case IEMMODE_16BIT:
3210 {
3211 Assert(uNewRip <= UINT16_MAX);
3212 if ( uNewRip > pCtx->cs.u32Limit
3213 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3214 return iemRaiseGeneralProtectionFault0(pIemCpu);
3215 /** @todo Test 16-bit jump in 64-bit mode. */
3216 pCtx->rip = uNewRip;
3217 break;
3218 }
3219
3220 case IEMMODE_32BIT:
3221 {
3222 Assert(uNewRip <= UINT32_MAX);
3223 Assert(pCtx->rip <= UINT32_MAX);
3224 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3225
3226 if (uNewRip > pCtx->cs.u32Limit)
3227 return iemRaiseGeneralProtectionFault0(pIemCpu);
3228 pCtx->rip = uNewRip;
3229 break;
3230 }
3231
3232 case IEMMODE_64BIT:
3233 {
3234 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3235
3236 if (!IEM_IS_CANONICAL(uNewRip))
3237 return iemRaiseGeneralProtectionFault0(pIemCpu);
3238 pCtx->rip = uNewRip;
3239 break;
3240 }
3241
3242 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3243 }
3244
3245 return VINF_SUCCESS;
3246}
3247
3248
3249/**
3250 * Get the address of the top of the stack.
3251 *
3252 * @param pCtx The CPU context which SP/ESP/RSP should be
3253 * read.
3254 */
3255DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3256{
3257 if (pCtx->ss.Attr.n.u1Long)
3258 return pCtx->rsp;
3259 if (pCtx->ss.Attr.n.u1DefBig)
3260 return pCtx->esp;
3261 return pCtx->sp;
3262}
3263
3264
3265/**
3266 * Updates the RIP/EIP/IP to point to the next instruction.
3267 *
3268 * @param pIemCpu The per CPU data.
3269 * @param cbInstr The number of bytes to add.
3270 */
3271static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3272{
3273 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3274 switch (pIemCpu->enmCpuMode)
3275 {
3276 case IEMMODE_16BIT:
3277 Assert(pCtx->rip <= UINT16_MAX);
3278 pCtx->eip += cbInstr;
3279 pCtx->eip &= UINT32_C(0xffff);
3280 break;
3281
3282 case IEMMODE_32BIT:
3283 pCtx->eip += cbInstr;
3284 Assert(pCtx->rip <= UINT32_MAX);
3285 break;
3286
3287 case IEMMODE_64BIT:
3288 pCtx->rip += cbInstr;
3289 break;
3290 default: AssertFailed();
3291 }
3292}
3293
3294
3295/**
3296 * Updates the RIP/EIP/IP to point to the next instruction.
3297 *
3298 * @param pIemCpu The per CPU data.
3299 */
3300static void iemRegUpdateRip(PIEMCPU pIemCpu)
3301{
3302 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3303}
3304
3305
3306/**
3307 * Adds to the stack pointer.
3308 *
3309 * @param pCtx The CPU context which SP/ESP/RSP should be
3310 * updated.
3311 * @param cbToAdd The number of bytes to add.
3312 */
3313DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3314{
3315 if (pCtx->ss.Attr.n.u1Long)
3316 pCtx->rsp += cbToAdd;
3317 else if (pCtx->ss.Attr.n.u1DefBig)
3318 pCtx->esp += cbToAdd;
3319 else
3320 pCtx->sp += cbToAdd;
3321}
3322
3323
3324/**
3325 * Subtracts from the stack pointer.
3326 *
3327 * @param pCtx The CPU context which SP/ESP/RSP should be
3328 * updated.
3329 * @param cbToSub The number of bytes to subtract.
3330 */
3331DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3332{
3333 if (pCtx->ss.Attr.n.u1Long)
3334 pCtx->rsp -= cbToSub;
3335 else if (pCtx->ss.Attr.n.u1DefBig)
3336 pCtx->esp -= cbToSub;
3337 else
3338 pCtx->sp -= cbToSub;
3339}
3340
3341
3342/**
3343 * Adds to the temporary stack pointer.
3344 *
3345 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3346 * @param cbToAdd The number of bytes to add.
3347 * @param pCtx Where to get the current stack mode.
3348 */
3349DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3350{
3351 if (pCtx->ss.Attr.n.u1Long)
3352 pTmpRsp->u += cbToAdd;
3353 else if (pCtx->ss.Attr.n.u1DefBig)
3354 pTmpRsp->DWords.dw0 += cbToAdd;
3355 else
3356 pTmpRsp->Words.w0 += cbToAdd;
3357}
3358
3359
3360/**
3361 * Subtracts from the temporary stack pointer.
3362 *
3363 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3364 * @param cbToSub The number of bytes to subtract.
3365 * @param pCtx Where to get the current stack mode.
3366 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3367 * expecting that.
3368 */
3369DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3370{
3371 if (pCtx->ss.Attr.n.u1Long)
3372 pTmpRsp->u -= cbToSub;
3373 else if (pCtx->ss.Attr.n.u1DefBig)
3374 pTmpRsp->DWords.dw0 -= cbToSub;
3375 else
3376 pTmpRsp->Words.w0 -= cbToSub;
3377}
3378
3379
3380/**
3381 * Calculates the effective stack address for a push of the specified size as
3382 * well as the new RSP value (upper bits may be masked).
3383 *
3384 * @returns Effective stack addressf for the push.
3385 * @param pCtx Where to get the current stack mode.
3386 * @param cbItem The size of the stack item to pop.
3387 * @param puNewRsp Where to return the new RSP value.
3388 */
3389DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3390{
3391 RTUINT64U uTmpRsp;
3392 RTGCPTR GCPtrTop;
3393 uTmpRsp.u = pCtx->rsp;
3394
3395 if (pCtx->ss.Attr.n.u1Long)
3396 GCPtrTop = uTmpRsp.u -= cbItem;
3397 else if (pCtx->ss.Attr.n.u1DefBig)
3398 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3399 else
3400 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3401 *puNewRsp = uTmpRsp.u;
3402 return GCPtrTop;
3403}
3404
3405
3406/**
3407 * Gets the current stack pointer and calculates the value after a pop of the
3408 * specified size.
3409 *
3410 * @returns Current stack pointer.
3411 * @param pCtx Where to get the current stack mode.
3412 * @param cbItem The size of the stack item to pop.
3413 * @param puNewRsp Where to return the new RSP value.
3414 */
3415DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3416{
3417 RTUINT64U uTmpRsp;
3418 RTGCPTR GCPtrTop;
3419 uTmpRsp.u = pCtx->rsp;
3420
3421 if (pCtx->ss.Attr.n.u1Long)
3422 {
3423 GCPtrTop = uTmpRsp.u;
3424 uTmpRsp.u += cbItem;
3425 }
3426 else if (pCtx->ss.Attr.n.u1DefBig)
3427 {
3428 GCPtrTop = uTmpRsp.DWords.dw0;
3429 uTmpRsp.DWords.dw0 += cbItem;
3430 }
3431 else
3432 {
3433 GCPtrTop = uTmpRsp.Words.w0;
3434 uTmpRsp.Words.w0 += cbItem;
3435 }
3436 *puNewRsp = uTmpRsp.u;
3437 return GCPtrTop;
3438}
3439
3440
3441/**
3442 * Calculates the effective stack address for a push of the specified size as
3443 * well as the new temporary RSP value (upper bits may be masked).
3444 *
3445 * @returns Effective stack addressf for the push.
3446 * @param pTmpRsp The temporary stack pointer. This is updated.
3447 * @param cbItem The size of the stack item to pop.
3448 * @param puNewRsp Where to return the new RSP value.
3449 */
3450DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3451{
3452 RTGCPTR GCPtrTop;
3453
3454 if (pCtx->ss.Attr.n.u1Long)
3455 GCPtrTop = pTmpRsp->u -= cbItem;
3456 else if (pCtx->ss.Attr.n.u1DefBig)
3457 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3458 else
3459 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3460 return GCPtrTop;
3461}
3462
3463
3464/**
3465 * Gets the effective stack address for a pop of the specified size and
3466 * calculates and updates the temporary RSP.
3467 *
3468 * @returns Current stack pointer.
3469 * @param pTmpRsp The temporary stack pointer. This is updated.
3470 * @param pCtx Where to get the current stack mode.
3471 * @param cbItem The size of the stack item to pop.
3472 */
3473DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3474{
3475 RTGCPTR GCPtrTop;
3476 if (pCtx->ss.Attr.n.u1Long)
3477 {
3478 GCPtrTop = pTmpRsp->u;
3479 pTmpRsp->u += cbItem;
3480 }
3481 else if (pCtx->ss.Attr.n.u1DefBig)
3482 {
3483 GCPtrTop = pTmpRsp->DWords.dw0;
3484 pTmpRsp->DWords.dw0 += cbItem;
3485 }
3486 else
3487 {
3488 GCPtrTop = pTmpRsp->Words.w0;
3489 pTmpRsp->Words.w0 += cbItem;
3490 }
3491 return GCPtrTop;
3492}
3493
3494
3495/**
3496 * Checks if an Intel CPUID feature bit is set.
3497 *
3498 * @returns true / false.
3499 *
3500 * @param pIemCpu The IEM per CPU data.
3501 * @param fEdx The EDX bit to test, or 0 if ECX.
3502 * @param fEcx The ECX bit to test, or 0 if EDX.
3503 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3504 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3505 */
3506static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3507{
3508 uint32_t uEax, uEbx, uEcx, uEdx;
3509 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3510 return (fEcx && (uEcx & fEcx))
3511 || (fEdx && (uEdx & fEdx));
3512}
3513
3514
3515/**
3516 * Checks if an AMD CPUID feature bit is set.
3517 *
3518 * @returns true / false.
3519 *
3520 * @param pIemCpu The IEM per CPU data.
3521 * @param fEdx The EDX bit to test, or 0 if ECX.
3522 * @param fEcx The ECX bit to test, or 0 if EDX.
3523 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3524 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3525 */
3526static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3527{
3528 uint32_t uEax, uEbx, uEcx, uEdx;
3529 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3530 return (fEcx && (uEcx & fEcx))
3531 || (fEdx && (uEdx & fEdx));
3532}
3533
3534/** @} */
3535
3536
3537/** @name FPU access and helpers.
3538 *
3539 * @{
3540 */
3541
3542
3543/**
3544 * Hook for preparing to use the host FPU.
3545 *
3546 * This is necessary in ring-0 and raw-mode context.
3547 *
3548 * @param pIemCpu The IEM per CPU data.
3549 */
3550DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3551{
3552#ifdef IN_RING3
3553 NOREF(pIemCpu);
3554#else
3555/** @todo RZ: FIXME */
3556//# error "Implement me"
3557#endif
3558}
3559
3560
3561/**
3562 * Stores a QNaN value into a FPU register.
3563 *
3564 * @param pReg Pointer to the register.
3565 */
3566DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3567{
3568 pReg->au32[0] = UINT32_C(0x00000000);
3569 pReg->au32[1] = UINT32_C(0xc0000000);
3570 pReg->au16[4] = UINT16_C(0xffff);
3571}
3572
3573
3574/**
3575 * Updates the FOP, FPU.CS and FPUIP registers.
3576 *
3577 * @param pIemCpu The IEM per CPU data.
3578 * @param pCtx The CPU context.
3579 */
3580DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3581{
3582 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3583 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3584 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3585 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3586 {
3587 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3588 * happens in real mode here based on the fnsave and fnstenv images. */
3589 pCtx->fpu.CS = 0;
3590 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3591 }
3592 else
3593 {
3594 pCtx->fpu.CS = pCtx->cs.Sel;
3595 pCtx->fpu.FPUIP = pCtx->rip;
3596 }
3597}
3598
3599
3600/**
3601 * Updates the FPU.DS and FPUDP registers.
3602 *
3603 * @param pIemCpu The IEM per CPU data.
3604 * @param pCtx The CPU context.
3605 * @param iEffSeg The effective segment register.
3606 * @param GCPtrEff The effective address relative to @a iEffSeg.
3607 */
3608DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3609{
3610 RTSEL sel;
3611 switch (iEffSeg)
3612 {
3613 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3614 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3615 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3616 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3617 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3618 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3619 default:
3620 AssertMsgFailed(("%d\n", iEffSeg));
3621 sel = pCtx->ds.Sel;
3622 }
3623 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3624 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3625 {
3626 pCtx->fpu.DS = 0;
3627 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3628 }
3629 else
3630 {
3631 pCtx->fpu.DS = sel;
3632 pCtx->fpu.FPUDP = GCPtrEff;
3633 }
3634}
3635
3636
3637/**
3638 * Rotates the stack registers in the push direction.
3639 *
3640 * @param pCtx The CPU context.
3641 * @remarks This is a complete waste of time, but fxsave stores the registers in
3642 * stack order.
3643 */
3644DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3645{
3646 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3647 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3648 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3649 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3650 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3651 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3652 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3653 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3654 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3655}
3656
3657
3658/**
3659 * Rotates the stack registers in the pop direction.
3660 *
3661 * @param pCtx The CPU context.
3662 * @remarks This is a complete waste of time, but fxsave stores the registers in
3663 * stack order.
3664 */
3665DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3666{
3667 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3668 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3669 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3670 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3671 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3672 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3673 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3674 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3675 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3676}
3677
3678
3679/**
3680 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3681 * exception prevents it.
3682 *
3683 * @param pIemCpu The IEM per CPU data.
3684 * @param pResult The FPU operation result to push.
3685 * @param pCtx The CPU context.
3686 */
3687static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3688{
3689 /* Update FSW and bail if there are pending exceptions afterwards. */
3690 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3691 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3692 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3693 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3694 {
3695 pCtx->fpu.FSW = fFsw;
3696 return;
3697 }
3698
3699 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3700 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3701 {
3702 /* All is fine, push the actual value. */
3703 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3704 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3705 }
3706 else if (pCtx->fpu.FCW & X86_FCW_IM)
3707 {
3708 /* Masked stack overflow, push QNaN. */
3709 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3710 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3711 }
3712 else
3713 {
3714 /* Raise stack overflow, don't push anything. */
3715 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3716 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3717 return;
3718 }
3719
3720 fFsw &= ~X86_FSW_TOP_MASK;
3721 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3722 pCtx->fpu.FSW = fFsw;
3723
3724 iemFpuRotateStackPush(pCtx);
3725}
3726
3727
3728/**
3729 * Stores a result in a FPU register and updates the FSW and FTW.
3730 *
3731 * @param pIemCpu The IEM per CPU data.
3732 * @param pResult The result to store.
3733 * @param iStReg Which FPU register to store it in.
3734 * @param pCtx The CPU context.
3735 */
3736static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3737{
3738 Assert(iStReg < 8);
3739 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3740 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3741 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3742 pCtx->fpu.FTW |= RT_BIT(iReg);
3743 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3744}
3745
3746
3747/**
3748 * Only updates the FPU status word (FSW) with the result of the current
3749 * instruction.
3750 *
3751 * @param pCtx The CPU context.
3752 * @param u16FSW The FSW output of the current instruction.
3753 */
3754static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3755{
3756 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3757 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3758}
3759
3760
3761/**
3762 * Pops one item off the FPU stack if no pending exception prevents it.
3763 *
3764 * @param pCtx The CPU context.
3765 */
3766static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3767{
3768 /* Check pending exceptions. */
3769 uint16_t uFSW = pCtx->fpu.FSW;
3770 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3771 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3772 return;
3773
3774 /* TOP--. */
3775 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3776 uFSW &= ~X86_FSW_TOP_MASK;
3777 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3778 pCtx->fpu.FSW = uFSW;
3779
3780 /* Mark the previous ST0 as empty. */
3781 iOldTop >>= X86_FSW_TOP_SHIFT;
3782 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3783
3784 /* Rotate the registers. */
3785 iemFpuRotateStackPop(pCtx);
3786}
3787
3788
3789/**
3790 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3791 *
3792 * @param pIemCpu The IEM per CPU data.
3793 * @param pResult The FPU operation result to push.
3794 */
3795static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3796{
3797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3798 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3799 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3800}
3801
3802
3803/**
3804 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3805 * and sets FPUDP and FPUDS.
3806 *
3807 * @param pIemCpu The IEM per CPU data.
3808 * @param pResult The FPU operation result to push.
3809 * @param iEffSeg The effective segment register.
3810 * @param GCPtrEff The effective address relative to @a iEffSeg.
3811 */
3812static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3813{
3814 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3815 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3816 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3817 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3818}
3819
3820
3821/**
3822 * Replace ST0 with the first value and push the second onto the FPU stack,
3823 * unless a pending exception prevents it.
3824 *
3825 * @param pIemCpu The IEM per CPU data.
3826 * @param pResult The FPU operation result to store and push.
3827 */
3828static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3829{
3830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3831 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3832
3833 /* Update FSW and bail if there are pending exceptions afterwards. */
3834 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3835 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3836 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3837 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3838 {
3839 pCtx->fpu.FSW = fFsw;
3840 return;
3841 }
3842
3843 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3844 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3845 {
3846 /* All is fine, push the actual value. */
3847 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3848 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3849 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3850 }
3851 else if (pCtx->fpu.FCW & X86_FCW_IM)
3852 {
3853 /* Masked stack overflow, push QNaN. */
3854 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3855 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3856 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3857 }
3858 else
3859 {
3860 /* Raise stack overflow, don't push anything. */
3861 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3862 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3863 return;
3864 }
3865
3866 fFsw &= ~X86_FSW_TOP_MASK;
3867 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3868 pCtx->fpu.FSW = fFsw;
3869
3870 iemFpuRotateStackPush(pCtx);
3871}
3872
3873
3874/**
3875 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3876 * FOP.
3877 *
3878 * @param pIemCpu The IEM per CPU data.
3879 * @param pResult The result to store.
3880 * @param iStReg Which FPU register to store it in.
3881 * @param pCtx The CPU context.
3882 */
3883static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3884{
3885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3886 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3887 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3888}
3889
3890
3891/**
3892 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3893 * FOP, and then pops the stack.
3894 *
3895 * @param pIemCpu The IEM per CPU data.
3896 * @param pResult The result to store.
3897 * @param iStReg Which FPU register to store it in.
3898 * @param pCtx The CPU context.
3899 */
3900static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3901{
3902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3903 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3904 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3905 iemFpuMaybePopOne(pCtx);
3906}
3907
3908
3909/**
3910 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3911 * FPUDP, and FPUDS.
3912 *
3913 * @param pIemCpu The IEM per CPU data.
3914 * @param pResult The result to store.
3915 * @param iStReg Which FPU register to store it in.
3916 * @param pCtx The CPU context.
3917 * @param iEffSeg The effective memory operand selector register.
3918 * @param GCPtrEff The effective memory operand offset.
3919 */
3920static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3921{
3922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3923 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3924 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3925 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3926}
3927
3928
3929/**
3930 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3931 * FPUDP, and FPUDS, and then pops the stack.
3932 *
3933 * @param pIemCpu The IEM per CPU data.
3934 * @param pResult The result to store.
3935 * @param iStReg Which FPU register to store it in.
3936 * @param pCtx The CPU context.
3937 * @param iEffSeg The effective memory operand selector register.
3938 * @param GCPtrEff The effective memory operand offset.
3939 */
3940static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
3941 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3942{
3943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3944 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3945 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3946 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3947 iemFpuMaybePopOne(pCtx);
3948}
3949
3950
3951/**
3952 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
3953 *
3954 * @param pIemCpu The IEM per CPU data.
3955 */
3956static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
3957{
3958 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
3959}
3960
3961
3962/**
3963 * Marks the specified stack register as free (for FFREE).
3964 *
3965 * @param pIemCpu The IEM per CPU data.
3966 * @param iStReg The register to free.
3967 */
3968static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
3969{
3970 Assert(iStReg < 8);
3971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3972 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3973 pCtx->fpu.FTW &= ~RT_BIT(iReg);
3974}
3975
3976
3977/**
3978 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3979 *
3980 * @param pIemCpu The IEM per CPU data.
3981 */
3982static void iemFpuStackIncTop(PIEMCPU pIemCpu)
3983{
3984 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3985 uint16_t uFsw = pCtx->fpu.FSW;
3986 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3987 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3988 uFsw &= ~X86_FSW_TOP_MASK;
3989 uFsw |= uTop;
3990 pCtx->fpu.FSW = uFsw;
3991}
3992
3993
3994/**
3995 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3996 *
3997 * @param pIemCpu The IEM per CPU data.
3998 */
3999static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4000{
4001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4002 uint16_t uFsw = pCtx->fpu.FSW;
4003 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4004 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4005 uFsw &= ~X86_FSW_TOP_MASK;
4006 uFsw |= uTop;
4007 pCtx->fpu.FSW = uFsw;
4008}
4009
4010
4011/**
4012 * Updates the FSW, FOP, FPUIP, and FPUCS.
4013 *
4014 * @param pIemCpu The IEM per CPU data.
4015 * @param u16FSW The FSW from the current instruction.
4016 */
4017static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4018{
4019 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4020 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4021 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4022}
4023
4024
4025/**
4026 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4027 *
4028 * @param pIemCpu The IEM per CPU data.
4029 * @param u16FSW The FSW from the current instruction.
4030 */
4031static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4032{
4033 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4034 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4035 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4036 iemFpuMaybePopOne(pCtx);
4037}
4038
4039
4040/**
4041 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4042 *
4043 * @param pIemCpu The IEM per CPU data.
4044 * @param u16FSW The FSW from the current instruction.
4045 * @param iEffSeg The effective memory operand selector register.
4046 * @param GCPtrEff The effective memory operand offset.
4047 */
4048static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4049{
4050 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4051 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4052 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4053 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4054}
4055
4056
4057/**
4058 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4059 *
4060 * @param pIemCpu The IEM per CPU data.
4061 * @param u16FSW The FSW from the current instruction.
4062 */
4063static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4064{
4065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4066 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4067 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4068 iemFpuMaybePopOne(pCtx);
4069 iemFpuMaybePopOne(pCtx);
4070}
4071
4072
4073/**
4074 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4075 *
4076 * @param pIemCpu The IEM per CPU data.
4077 * @param u16FSW The FSW from the current instruction.
4078 * @param iEffSeg The effective memory operand selector register.
4079 * @param GCPtrEff The effective memory operand offset.
4080 */
4081static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4082{
4083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4084 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4085 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4086 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4087 iemFpuMaybePopOne(pCtx);
4088}
4089
4090
4091/**
4092 * Worker routine for raising an FPU stack underflow exception.
4093 *
4094 * @param pIemCpu The IEM per CPU data.
4095 * @param iStReg The stack register being accessed.
4096 * @param pCtx The CPU context.
4097 */
4098static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4099{
4100 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4101 if (pCtx->fpu.FCW & X86_FCW_IM)
4102 {
4103 /* Masked underflow. */
4104 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4105 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4106 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4107 if (iStReg != UINT8_MAX)
4108 {
4109 pCtx->fpu.FTW |= RT_BIT(iReg);
4110 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4111 }
4112 }
4113 else
4114 {
4115 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4116 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4117 }
4118}
4119
4120
4121/**
4122 * Raises a FPU stack underflow exception.
4123 *
4124 * @param pIemCpu The IEM per CPU data.
4125 * @param iStReg The destination register that should be loaded
4126 * with QNaN if \#IS is not masked. Specify
4127 * UINT8_MAX if none (like for fcom).
4128 */
4129DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4130{
4131 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4132 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4133 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4134}
4135
4136
4137DECL_NO_INLINE(static, void)
4138iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4139{
4140 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4141 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4142 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4143 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4144}
4145
4146
4147DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4148{
4149 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4150 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4151 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4152 iemFpuMaybePopOne(pCtx);
4153}
4154
4155
4156DECL_NO_INLINE(static, void)
4157iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4158{
4159 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4160 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4161 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4162 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4163 iemFpuMaybePopOne(pCtx);
4164}
4165
4166
4167DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4168{
4169 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4170 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4171 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4172 iemFpuMaybePopOne(pCtx);
4173 iemFpuMaybePopOne(pCtx);
4174}
4175
4176
4177DECL_NO_INLINE(static, void)
4178iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4179{
4180 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4181 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4182
4183 if (pCtx->fpu.FCW & X86_FCW_IM)
4184 {
4185 /* Masked overflow - Push QNaN. */
4186 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4187 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4188 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4189 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4190 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4191 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4192 iemFpuRotateStackPush(pCtx);
4193 }
4194 else
4195 {
4196 /* Exception pending - don't change TOP or the register stack. */
4197 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4198 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4199 }
4200}
4201
4202
4203DECL_NO_INLINE(static, void)
4204iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4205{
4206 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4207 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4208
4209 if (pCtx->fpu.FCW & X86_FCW_IM)
4210 {
4211 /* Masked overflow - Push QNaN. */
4212 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4213 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4214 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4215 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4216 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4217 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4218 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4219 iemFpuRotateStackPush(pCtx);
4220 }
4221 else
4222 {
4223 /* Exception pending - don't change TOP or the register stack. */
4224 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4225 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4226 }
4227}
4228
4229
4230/**
4231 * Worker routine for raising an FPU stack overflow exception on a push.
4232 *
4233 * @param pIemCpu The IEM per CPU data.
4234 * @param pCtx The CPU context.
4235 */
4236static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4237{
4238 if (pCtx->fpu.FCW & X86_FCW_IM)
4239 {
4240 /* Masked overflow. */
4241 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4242 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4243 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4244 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4245 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4246 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4247 iemFpuRotateStackPush(pCtx);
4248 }
4249 else
4250 {
4251 /* Exception pending - don't change TOP or the register stack. */
4252 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4253 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4254 }
4255}
4256
4257
4258/**
4259 * Raises a FPU stack overflow exception on a push.
4260 *
4261 * @param pIemCpu The IEM per CPU data.
4262 */
4263DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4264{
4265 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4266 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4267 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4268}
4269
4270
4271/**
4272 * Raises a FPU stack overflow exception on a push with a memory operand.
4273 *
4274 * @param pIemCpu The IEM per CPU data.
4275 * @param iEffSeg The effective memory operand selector register.
4276 * @param GCPtrEff The effective memory operand offset.
4277 */
4278DECL_NO_INLINE(static, void)
4279iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4280{
4281 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4282 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4283 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4284 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4285}
4286
4287
4288static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4289{
4290 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4291 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4292 if (pCtx->fpu.FTW & RT_BIT(iReg))
4293 return VINF_SUCCESS;
4294 return VERR_NOT_FOUND;
4295}
4296
4297
4298static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4299{
4300 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4301 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4302 if (pCtx->fpu.FTW & RT_BIT(iReg))
4303 {
4304 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4305 return VINF_SUCCESS;
4306 }
4307 return VERR_NOT_FOUND;
4308}
4309
4310
4311static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4312 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4313{
4314 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4315 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4316 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4317 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4318 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4319 {
4320 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4321 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4322 return VINF_SUCCESS;
4323 }
4324 return VERR_NOT_FOUND;
4325}
4326
4327
4328static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4329{
4330 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4331 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4332 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4333 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4334 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4335 {
4336 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4337 return VINF_SUCCESS;
4338 }
4339 return VERR_NOT_FOUND;
4340}
4341
4342
4343/**
4344 * Updates the FPU exception status after FCW is changed.
4345 *
4346 * @param pCtx The CPU context.
4347 */
4348static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4349{
4350 uint16_t u16Fsw = pCtx->fpu.FSW;
4351 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4352 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4353 else
4354 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4355 pCtx->fpu.FSW = u16Fsw;
4356}
4357
4358
4359/**
4360 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4361 *
4362 * @returns The full FTW.
4363 * @param pCtx The CPU state.
4364 */
4365static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4366{
4367 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4368 uint16_t u16Ftw = 0;
4369 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4370 for (unsigned iSt = 0; iSt < 8; iSt++)
4371 {
4372 unsigned const iReg = (iSt + iTop) & 7;
4373 if (!(u8Ftw & RT_BIT(iReg)))
4374 u16Ftw |= 3 << (iReg * 2); /* empty */
4375 else
4376 {
4377 uint16_t uTag;
4378 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4379 if (pr80Reg->s.uExponent == 0x7fff)
4380 uTag = 2; /* Exponent is all 1's => Special. */
4381 else if (pr80Reg->s.uExponent == 0x0000)
4382 {
4383 if (pr80Reg->s.u64Mantissa == 0x0000)
4384 uTag = 1; /* All bits are zero => Zero. */
4385 else
4386 uTag = 2; /* Must be special. */
4387 }
4388 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4389 uTag = 0; /* Valid. */
4390 else
4391 uTag = 2; /* Must be special. */
4392
4393 u16Ftw |= uTag << (iReg * 2); /* empty */
4394 }
4395 }
4396
4397 return u16Ftw;
4398}
4399
4400
4401/**
4402 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4403 *
4404 * @returns The compressed FTW.
4405 * @param u16FullFtw The full FTW to convert.
4406 */
4407static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4408{
4409 uint8_t u8Ftw = 0;
4410 for (unsigned i = 0; i < 8; i++)
4411 {
4412 if ((u16FullFtw & 3) != 3 /*empty*/)
4413 u8Ftw |= RT_BIT(i);
4414 u16FullFtw >>= 2;
4415 }
4416
4417 return u8Ftw;
4418}
4419
4420/** @} */
4421
4422
4423/** @name Memory access.
4424 *
4425 * @{
4426 */
4427
4428
4429/**
4430 * Checks if the given segment can be written to, raise the appropriate
4431 * exception if not.
4432 *
4433 * @returns VBox strict status code.
4434 *
4435 * @param pIemCpu The IEM per CPU data.
4436 * @param pHid Pointer to the hidden register.
4437 * @param iSegReg The register number.
4438 */
4439static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4440{
4441 if (!pHid->Attr.n.u1Present)
4442 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4443
4444 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4445 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4446 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4447 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4448
4449 /** @todo DPL/RPL/CPL? */
4450
4451 return VINF_SUCCESS;
4452}
4453
4454
4455/**
4456 * Checks if the given segment can be read from, raise the appropriate
4457 * exception if not.
4458 *
4459 * @returns VBox strict status code.
4460 *
4461 * @param pIemCpu The IEM per CPU data.
4462 * @param pHid Pointer to the hidden register.
4463 * @param iSegReg The register number.
4464 */
4465static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4466{
4467 if (!pHid->Attr.n.u1Present)
4468 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4469
4470 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4471 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4472 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4473
4474 /** @todo DPL/RPL/CPL? */
4475
4476 return VINF_SUCCESS;
4477}
4478
4479
4480/**
4481 * Applies the segment limit, base and attributes.
4482 *
4483 * This may raise a \#GP or \#SS.
4484 *
4485 * @returns VBox strict status code.
4486 *
4487 * @param pIemCpu The IEM per CPU data.
4488 * @param fAccess The kind of access which is being performed.
4489 * @param iSegReg The index of the segment register to apply.
4490 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4491 * TSS, ++).
4492 * @param pGCPtrMem Pointer to the guest memory address to apply
4493 * segmentation to. Input and output parameter.
4494 */
4495static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4496 size_t cbMem, PRTGCPTR pGCPtrMem)
4497{
4498 if (iSegReg == UINT8_MAX)
4499 return VINF_SUCCESS;
4500
4501 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4502 switch (pIemCpu->enmCpuMode)
4503 {
4504 case IEMMODE_16BIT:
4505 case IEMMODE_32BIT:
4506 {
4507 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4508 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4509
4510 Assert(pSel->Attr.n.u1Present);
4511 Assert(pSel->Attr.n.u1DescType);
4512 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4513 {
4514 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4515 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4516 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4517
4518 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4519 {
4520 /** @todo CPL check. */
4521 }
4522
4523 /*
4524 * There are two kinds of data selectors, normal and expand down.
4525 */
4526 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4527 {
4528 if ( GCPtrFirst32 > pSel->u32Limit
4529 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4530 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4531
4532 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4533 }
4534 else
4535 {
4536 /** @todo implement expand down segments. */
4537 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4538 }
4539 }
4540 else
4541 {
4542
4543 /*
4544 * Code selector and usually be used to read thru, writing is
4545 * only permitted in real and V8086 mode.
4546 */
4547 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4548 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4549 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4550 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4551 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4552
4553 if ( GCPtrFirst32 > pSel->u32Limit
4554 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4555 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4556
4557 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4558 {
4559 /** @todo CPL check. */
4560 }
4561
4562 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4563 }
4564 return VINF_SUCCESS;
4565 }
4566
4567 case IEMMODE_64BIT:
4568 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4569 *pGCPtrMem += pSel->u64Base;
4570 return VINF_SUCCESS;
4571
4572 default:
4573 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4574 }
4575}
4576
4577
4578/**
4579 * Translates a virtual address to a physical physical address and checks if we
4580 * can access the page as specified.
4581 *
4582 * @param pIemCpu The IEM per CPU data.
4583 * @param GCPtrMem The virtual address.
4584 * @param fAccess The intended access.
4585 * @param pGCPhysMem Where to return the physical address.
4586 */
4587static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4588 PRTGCPHYS pGCPhysMem)
4589{
4590 /** @todo Need a different PGM interface here. We're currently using
4591 * generic / REM interfaces. this won't cut it for R0 & RC. */
4592 RTGCPHYS GCPhys;
4593 uint64_t fFlags;
4594 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4595 if (RT_FAILURE(rc))
4596 {
4597 /** @todo Check unassigned memory in unpaged mode. */
4598 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4599 *pGCPhysMem = NIL_RTGCPHYS;
4600 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4601 }
4602
4603 /* If the page is writable and does not have the no-exec bit set, all
4604 access is allowed. Otherwise we'll have to check more carefully... */
4605 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4606 {
4607 /* Write to read only memory? */
4608 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4609 && !(fFlags & X86_PTE_RW)
4610 && ( pIemCpu->uCpl != 0
4611 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4612 {
4613 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4614 *pGCPhysMem = NIL_RTGCPHYS;
4615 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4616 }
4617
4618 /* Kernel memory accessed by userland? */
4619 if ( !(fFlags & X86_PTE_US)
4620 && pIemCpu->uCpl == 3
4621 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4622 {
4623 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4624 *pGCPhysMem = NIL_RTGCPHYS;
4625 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4626 }
4627
4628 /* Executing non-executable memory? */
4629 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4630 && (fFlags & X86_PTE_PAE_NX)
4631 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4632 {
4633 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4634 *pGCPhysMem = NIL_RTGCPHYS;
4635 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4636 VERR_ACCESS_DENIED);
4637 }
4638 }
4639
4640 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4641 *pGCPhysMem = GCPhys;
4642 return VINF_SUCCESS;
4643}
4644
4645
4646
4647/**
4648 * Maps a physical page.
4649 *
4650 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4651 * @param pIemCpu The IEM per CPU data.
4652 * @param GCPhysMem The physical address.
4653 * @param fAccess The intended access.
4654 * @param ppvMem Where to return the mapping address.
4655 * @param pLock The PGM lock.
4656 */
4657static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
4658{
4659#ifdef IEM_VERIFICATION_MODE
4660 /* Force the alternative path so we can ignore writes. */
4661 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4662 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4663#endif
4664
4665 /** @todo This API may require some improving later. A private deal with PGM
4666 * regarding locking and unlocking needs to be struct. A couple of TLBs
4667 * living in PGM, but with publicly accessible inlined access methods
4668 * could perhaps be an even better solution. */
4669 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4670 GCPhysMem,
4671 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4672 pIemCpu->fByPassHandlers,
4673 ppvMem,
4674 pLock);
4675 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4676 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4677 return rc;
4678}
4679
4680
4681/**
4682 * Unmap a page previously mapped by iemMemPageMap.
4683 *
4684 * @param pIemCpu The IEM per CPU data.
4685 * @param GCPhysMem The physical address.
4686 * @param fAccess The intended access.
4687 * @param pvMem What iemMemPageMap returned.
4688 * @param pLock The PGM lock.
4689 */
4690DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
4691{
4692 NOREF(pIemCpu);
4693 NOREF(GCPhysMem);
4694 NOREF(fAccess);
4695 NOREF(pvMem);
4696 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
4697}
4698
4699
4700/**
4701 * Looks up a memory mapping entry.
4702 *
4703 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4704 * @param pIemCpu The IEM per CPU data.
4705 * @param pvMem The memory address.
4706 * @param fAccess The access to.
4707 */
4708DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4709{
4710 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4711 if ( pIemCpu->aMemMappings[0].pv == pvMem
4712 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4713 return 0;
4714 if ( pIemCpu->aMemMappings[1].pv == pvMem
4715 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4716 return 1;
4717 if ( pIemCpu->aMemMappings[2].pv == pvMem
4718 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4719 return 2;
4720 return VERR_NOT_FOUND;
4721}
4722
4723
4724/**
4725 * Finds a free memmap entry when using iNextMapping doesn't work.
4726 *
4727 * @returns Memory mapping index, 1024 on failure.
4728 * @param pIemCpu The IEM per CPU data.
4729 */
4730static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4731{
4732 /*
4733 * The easy case.
4734 */
4735 if (pIemCpu->cActiveMappings == 0)
4736 {
4737 pIemCpu->iNextMapping = 1;
4738 return 0;
4739 }
4740
4741 /* There should be enough mappings for all instructions. */
4742 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4743
4744 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4745 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4746 return i;
4747
4748 AssertFailedReturn(1024);
4749}
4750
4751
4752/**
4753 * Commits a bounce buffer that needs writing back and unmaps it.
4754 *
4755 * @returns Strict VBox status code.
4756 * @param pIemCpu The IEM per CPU data.
4757 * @param iMemMap The index of the buffer to commit.
4758 */
4759static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4760{
4761 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4762 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4763
4764 /*
4765 * Do the writing.
4766 */
4767 int rc;
4768 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4769 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4770 {
4771 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4772 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4773 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4774 if (!pIemCpu->fByPassHandlers)
4775 {
4776 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4777 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4778 pbBuf,
4779 cbFirst);
4780 if (cbSecond && rc == VINF_SUCCESS)
4781 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4782 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4783 pbBuf + cbFirst,
4784 cbSecond);
4785 }
4786 else
4787 {
4788 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4789 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4790 pbBuf,
4791 cbFirst);
4792 if (cbSecond && rc == VINF_SUCCESS)
4793 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4794 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4795 pbBuf + cbFirst,
4796 cbSecond);
4797 }
4798 if (rc != VINF_SUCCESS)
4799 {
4800 /** @todo status code handling */
4801 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
4802 pIemCpu->fByPassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
4803 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4804 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
4805 }
4806 }
4807 else
4808 rc = VINF_SUCCESS;
4809
4810#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4811 /*
4812 * Record the write(s).
4813 */
4814 if (!pIemCpu->fNoRem)
4815 {
4816 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4817 if (pEvtRec)
4818 {
4819 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4820 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4821 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4822 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4823 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4824 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4825 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4826 }
4827 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4828 {
4829 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4830 if (pEvtRec)
4831 {
4832 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4833 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4834 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4835 memcpy(pEvtRec->u.RamWrite.ab,
4836 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4837 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4838 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4839 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4840 }
4841 }
4842 }
4843#endif
4844
4845 /*
4846 * Free the mapping entry.
4847 */
4848 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4849 Assert(pIemCpu->cActiveMappings != 0);
4850 pIemCpu->cActiveMappings--;
4851 return rc;
4852}
4853
4854
4855/**
4856 * iemMemMap worker that deals with a request crossing pages.
4857 */
4858static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4859 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4860{
4861 /*
4862 * Do the address translations.
4863 */
4864 RTGCPHYS GCPhysFirst;
4865 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4866 if (rcStrict != VINF_SUCCESS)
4867 return rcStrict;
4868
4869 RTGCPHYS GCPhysSecond;
4870 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4871 if (rcStrict != VINF_SUCCESS)
4872 return rcStrict;
4873 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4874
4875 /*
4876 * Read in the current memory content if it's a read, execute or partial
4877 * write access.
4878 */
4879 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4880 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4881 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4882
4883 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4884 {
4885 int rc;
4886 if (!pIemCpu->fByPassHandlers)
4887 {
4888 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4889 if (rc != VINF_SUCCESS)
4890 {
4891 /** @todo status code handling */
4892 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4893 return rc;
4894 }
4895 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4896 if (rc != VINF_SUCCESS)
4897 {
4898 /** @todo status code handling */
4899 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
4900 return rc;
4901 }
4902 }
4903 else
4904 {
4905 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4906 if (rc != VINF_SUCCESS)
4907 {
4908 /** @todo status code handling */
4909 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4910 return rc;
4911 }
4912 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4913 if (rc != VINF_SUCCESS)
4914 {
4915 /** @todo status code handling */
4916 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
4917 return rc;
4918 }
4919 }
4920
4921#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4922 if ( !pIemCpu->fNoRem
4923 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
4924 {
4925 /*
4926 * Record the reads.
4927 */
4928 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4929 if (pEvtRec)
4930 {
4931 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4932 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4933 pEvtRec->u.RamRead.cb = cbFirstPage;
4934 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4935 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4936 }
4937 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4938 if (pEvtRec)
4939 {
4940 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4941 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4942 pEvtRec->u.RamRead.cb = cbSecondPage;
4943 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4944 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4945 }
4946 }
4947#endif
4948 }
4949#ifdef VBOX_STRICT
4950 else
4951 memset(pbBuf, 0xcc, cbMem);
4952#endif
4953#ifdef VBOX_STRICT
4954 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4955 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4956#endif
4957
4958 /*
4959 * Commit the bounce buffer entry.
4960 */
4961 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4962 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4963 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4964 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4965 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4966 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4967 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4968 pIemCpu->cActiveMappings++;
4969
4970 *ppvMem = pbBuf;
4971 return VINF_SUCCESS;
4972}
4973
4974
4975/**
4976 * iemMemMap woker that deals with iemMemPageMap failures.
4977 */
4978static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4979 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4980{
4981 /*
4982 * Filter out conditions we can handle and the ones which shouldn't happen.
4983 */
4984 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
4985 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4986 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4987 {
4988 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4989 return rcMap;
4990 }
4991 pIemCpu->cPotentialExits++;
4992
4993 /*
4994 * Read in the current memory content if it's a read, execute or partial
4995 * write access.
4996 */
4997 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4998 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4999 {
5000 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5001 memset(pbBuf, 0xff, cbMem);
5002 else
5003 {
5004 int rc;
5005 if (!pIemCpu->fByPassHandlers)
5006 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5007 else
5008 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5009 if (rc != VINF_SUCCESS)
5010 {
5011 /** @todo status code handling */
5012 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5013 pIemCpu->fByPassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5014 return rc;
5015 }
5016 }
5017
5018#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5019 if ( !pIemCpu->fNoRem
5020 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5021 {
5022 /*
5023 * Record the read.
5024 */
5025 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5026 if (pEvtRec)
5027 {
5028 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5029 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5030 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5031 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5032 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5033 }
5034 }
5035#endif
5036 }
5037#ifdef VBOX_STRICT
5038 else
5039 memset(pbBuf, 0xcc, cbMem);
5040#endif
5041#ifdef VBOX_STRICT
5042 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5043 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5044#endif
5045
5046 /*
5047 * Commit the bounce buffer entry.
5048 */
5049 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5050 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5051 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5052 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5053 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5054 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5055 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5056 pIemCpu->cActiveMappings++;
5057
5058 *ppvMem = pbBuf;
5059 return VINF_SUCCESS;
5060}
5061
5062
5063
5064/**
5065 * Maps the specified guest memory for the given kind of access.
5066 *
5067 * This may be using bounce buffering of the memory if it's crossing a page
5068 * boundary or if there is an access handler installed for any of it. Because
5069 * of lock prefix guarantees, we're in for some extra clutter when this
5070 * happens.
5071 *
5072 * This may raise a \#GP, \#SS, \#PF or \#AC.
5073 *
5074 * @returns VBox strict status code.
5075 *
5076 * @param pIemCpu The IEM per CPU data.
5077 * @param ppvMem Where to return the pointer to the mapped
5078 * memory.
5079 * @param cbMem The number of bytes to map. This is usually 1,
5080 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5081 * string operations it can be up to a page.
5082 * @param iSegReg The index of the segment register to use for
5083 * this access. The base and limits are checked.
5084 * Use UINT8_MAX to indicate that no segmentation
5085 * is required (for IDT, GDT and LDT accesses).
5086 * @param GCPtrMem The address of the guest memory.
5087 * @param a_fAccess How the memory is being accessed. The
5088 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5089 * how to map the memory, while the
5090 * IEM_ACCESS_WHAT_XXX bit is used when raising
5091 * exceptions.
5092 */
5093static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5094{
5095 /*
5096 * Check the input and figure out which mapping entry to use.
5097 */
5098 Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94);
5099 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5100
5101 unsigned iMemMap = pIemCpu->iNextMapping;
5102 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5103 {
5104 iMemMap = iemMemMapFindFree(pIemCpu);
5105 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5106 }
5107
5108 /*
5109 * Map the memory, checking that we can actually access it. If something
5110 * slightly complicated happens, fall back on bounce buffering.
5111 */
5112 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5113 if (rcStrict != VINF_SUCCESS)
5114 return rcStrict;
5115
5116 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5117 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5118
5119 RTGCPHYS GCPhysFirst;
5120 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123
5124 void *pvMem;
5125 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5126 if (rcStrict != VINF_SUCCESS)
5127 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5128
5129 /*
5130 * Fill in the mapping table entry.
5131 */
5132 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5133 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5134 pIemCpu->iNextMapping = iMemMap + 1;
5135 pIemCpu->cActiveMappings++;
5136
5137 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5138 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5139 pIemCpu->cbWritten += (uint32_t)cbMem;
5140 *ppvMem = pvMem;
5141 return VINF_SUCCESS;
5142}
5143
5144
5145/**
5146 * Commits the guest memory if bounce buffered and unmaps it.
5147 *
5148 * @returns Strict VBox status code.
5149 * @param pIemCpu The IEM per CPU data.
5150 * @param pvMem The mapping.
5151 * @param fAccess The kind of access.
5152 */
5153static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5154{
5155 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5156 AssertReturn(iMemMap >= 0, iMemMap);
5157
5158 /* If it's bounce buffered, we may need to write back the buffer. */
5159 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5160 {
5161 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5162 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5163 }
5164 /* Otherwise unlock it. */
5165 else
5166 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5167
5168 /* Free the entry. */
5169 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5170 Assert(pIemCpu->cActiveMappings != 0);
5171 pIemCpu->cActiveMappings--;
5172 return VINF_SUCCESS;
5173}
5174
5175
5176/**
5177 * Fetches a data byte.
5178 *
5179 * @returns Strict VBox status code.
5180 * @param pIemCpu The IEM per CPU data.
5181 * @param pu8Dst Where to return the byte.
5182 * @param iSegReg The index of the segment register to use for
5183 * this access. The base and limits are checked.
5184 * @param GCPtrMem The address of the guest memory.
5185 */
5186static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5187{
5188 /* The lazy approach for now... */
5189 uint8_t const *pu8Src;
5190 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5191 if (rc == VINF_SUCCESS)
5192 {
5193 *pu8Dst = *pu8Src;
5194 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5195 }
5196 return rc;
5197}
5198
5199
5200/**
5201 * Fetches a data word.
5202 *
5203 * @returns Strict VBox status code.
5204 * @param pIemCpu The IEM per CPU data.
5205 * @param pu16Dst Where to return the word.
5206 * @param iSegReg The index of the segment register to use for
5207 * this access. The base and limits are checked.
5208 * @param GCPtrMem The address of the guest memory.
5209 */
5210static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5211{
5212 /* The lazy approach for now... */
5213 uint16_t const *pu16Src;
5214 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5215 if (rc == VINF_SUCCESS)
5216 {
5217 *pu16Dst = *pu16Src;
5218 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5219 }
5220 return rc;
5221}
5222
5223
5224/**
5225 * Fetches a data dword.
5226 *
5227 * @returns Strict VBox status code.
5228 * @param pIemCpu The IEM per CPU data.
5229 * @param pu32Dst Where to return the dword.
5230 * @param iSegReg The index of the segment register to use for
5231 * this access. The base and limits are checked.
5232 * @param GCPtrMem The address of the guest memory.
5233 */
5234static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5235{
5236 /* The lazy approach for now... */
5237 uint32_t const *pu32Src;
5238 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5239 if (rc == VINF_SUCCESS)
5240 {
5241 *pu32Dst = *pu32Src;
5242 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5243 }
5244 return rc;
5245}
5246
5247
5248#ifdef SOME_UNUSED_FUNCTION
5249/**
5250 * Fetches a data dword and sign extends it to a qword.
5251 *
5252 * @returns Strict VBox status code.
5253 * @param pIemCpu The IEM per CPU data.
5254 * @param pu64Dst Where to return the sign extended value.
5255 * @param iSegReg The index of the segment register to use for
5256 * this access. The base and limits are checked.
5257 * @param GCPtrMem The address of the guest memory.
5258 */
5259static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5260{
5261 /* The lazy approach for now... */
5262 int32_t const *pi32Src;
5263 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5264 if (rc == VINF_SUCCESS)
5265 {
5266 *pu64Dst = *pi32Src;
5267 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5268 }
5269#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5270 else
5271 *pu64Dst = 0;
5272#endif
5273 return rc;
5274}
5275#endif
5276
5277
5278/**
5279 * Fetches a data qword.
5280 *
5281 * @returns Strict VBox status code.
5282 * @param pIemCpu The IEM per CPU data.
5283 * @param pu64Dst Where to return the qword.
5284 * @param iSegReg The index of the segment register to use for
5285 * this access. The base and limits are checked.
5286 * @param GCPtrMem The address of the guest memory.
5287 */
5288static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5289{
5290 /* The lazy approach for now... */
5291 uint64_t const *pu64Src;
5292 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5293 if (rc == VINF_SUCCESS)
5294 {
5295 *pu64Dst = *pu64Src;
5296 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5297 }
5298 return rc;
5299}
5300
5301
5302/**
5303 * Fetches a data tword.
5304 *
5305 * @returns Strict VBox status code.
5306 * @param pIemCpu The IEM per CPU data.
5307 * @param pr80Dst Where to return the tword.
5308 * @param iSegReg The index of the segment register to use for
5309 * this access. The base and limits are checked.
5310 * @param GCPtrMem The address of the guest memory.
5311 */
5312static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5313{
5314 /* The lazy approach for now... */
5315 PCRTFLOAT80U pr80Src;
5316 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5317 if (rc == VINF_SUCCESS)
5318 {
5319 *pr80Dst = *pr80Src;
5320 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5321 }
5322 return rc;
5323}
5324
5325
5326/**
5327 * Fetches a descriptor register (lgdt, lidt).
5328 *
5329 * @returns Strict VBox status code.
5330 * @param pIemCpu The IEM per CPU data.
5331 * @param pcbLimit Where to return the limit.
5332 * @param pGCPTrBase Where to return the base.
5333 * @param iSegReg The index of the segment register to use for
5334 * this access. The base and limits are checked.
5335 * @param GCPtrMem The address of the guest memory.
5336 * @param enmOpSize The effective operand size.
5337 */
5338static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5339 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5340{
5341 uint8_t const *pu8Src;
5342 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5343 (void **)&pu8Src,
5344 enmOpSize == IEMMODE_64BIT
5345 ? 2 + 8
5346 : enmOpSize == IEMMODE_32BIT
5347 ? 2 + 4
5348 : 2 + 3,
5349 iSegReg,
5350 GCPtrMem,
5351 IEM_ACCESS_DATA_R);
5352 if (rcStrict == VINF_SUCCESS)
5353 {
5354 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5355 switch (enmOpSize)
5356 {
5357 case IEMMODE_16BIT:
5358 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5359 break;
5360 case IEMMODE_32BIT:
5361 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5362 break;
5363 case IEMMODE_64BIT:
5364 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5365 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5366 break;
5367
5368 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5369 }
5370 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5371 }
5372 return rcStrict;
5373}
5374
5375
5376
5377/**
5378 * Stores a data byte.
5379 *
5380 * @returns Strict VBox status code.
5381 * @param pIemCpu The IEM per CPU data.
5382 * @param iSegReg The index of the segment register to use for
5383 * this access. The base and limits are checked.
5384 * @param GCPtrMem The address of the guest memory.
5385 * @param u8Value The value to store.
5386 */
5387static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5388{
5389 /* The lazy approach for now... */
5390 uint8_t *pu8Dst;
5391 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5392 if (rc == VINF_SUCCESS)
5393 {
5394 *pu8Dst = u8Value;
5395 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5396 }
5397 return rc;
5398}
5399
5400
5401/**
5402 * Stores a data word.
5403 *
5404 * @returns Strict VBox status code.
5405 * @param pIemCpu The IEM per CPU data.
5406 * @param iSegReg The index of the segment register to use for
5407 * this access. The base and limits are checked.
5408 * @param GCPtrMem The address of the guest memory.
5409 * @param u16Value The value to store.
5410 */
5411static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5412{
5413 /* The lazy approach for now... */
5414 uint16_t *pu16Dst;
5415 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5416 if (rc == VINF_SUCCESS)
5417 {
5418 *pu16Dst = u16Value;
5419 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5420 }
5421 return rc;
5422}
5423
5424
5425/**
5426 * Stores a data dword.
5427 *
5428 * @returns Strict VBox status code.
5429 * @param pIemCpu The IEM per CPU data.
5430 * @param iSegReg The index of the segment register to use for
5431 * this access. The base and limits are checked.
5432 * @param GCPtrMem The address of the guest memory.
5433 * @param u32Value The value to store.
5434 */
5435static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5436{
5437 /* The lazy approach for now... */
5438 uint32_t *pu32Dst;
5439 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5440 if (rc == VINF_SUCCESS)
5441 {
5442 *pu32Dst = u32Value;
5443 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5444 }
5445 return rc;
5446}
5447
5448
5449/**
5450 * Stores a data qword.
5451 *
5452 * @returns Strict VBox status code.
5453 * @param pIemCpu The IEM per CPU data.
5454 * @param iSegReg The index of the segment register to use for
5455 * this access. The base and limits are checked.
5456 * @param GCPtrMem The address of the guest memory.
5457 * @param u64Value The value to store.
5458 */
5459static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5460{
5461 /* The lazy approach for now... */
5462 uint64_t *pu64Dst;
5463 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5464 if (rc == VINF_SUCCESS)
5465 {
5466 *pu64Dst = u64Value;
5467 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5468 }
5469 return rc;
5470}
5471
5472
5473/**
5474 * Stores a descriptor register (sgdt, sidt).
5475 *
5476 * @returns Strict VBox status code.
5477 * @param pIemCpu The IEM per CPU data.
5478 * @param cbLimit The limit.
5479 * @param GCPTrBase The base address.
5480 * @param iSegReg The index of the segment register to use for
5481 * this access. The base and limits are checked.
5482 * @param GCPtrMem The address of the guest memory.
5483 * @param enmOpSize The effective operand size.
5484 */
5485static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5486 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5487{
5488 uint8_t *pu8Src;
5489 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5490 (void **)&pu8Src,
5491 enmOpSize == IEMMODE_64BIT
5492 ? 2 + 8
5493 : enmOpSize == IEMMODE_32BIT
5494 ? 2 + 4
5495 : 2 + 3,
5496 iSegReg,
5497 GCPtrMem,
5498 IEM_ACCESS_DATA_W);
5499 if (rcStrict == VINF_SUCCESS)
5500 {
5501 pu8Src[0] = RT_BYTE1(cbLimit);
5502 pu8Src[1] = RT_BYTE2(cbLimit);
5503 pu8Src[2] = RT_BYTE1(GCPtrBase);
5504 pu8Src[3] = RT_BYTE2(GCPtrBase);
5505 pu8Src[4] = RT_BYTE3(GCPtrBase);
5506 if (enmOpSize == IEMMODE_16BIT)
5507 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5508 else
5509 {
5510 pu8Src[5] = RT_BYTE4(GCPtrBase);
5511 if (enmOpSize == IEMMODE_64BIT)
5512 {
5513 pu8Src[6] = RT_BYTE5(GCPtrBase);
5514 pu8Src[7] = RT_BYTE6(GCPtrBase);
5515 pu8Src[8] = RT_BYTE7(GCPtrBase);
5516 pu8Src[9] = RT_BYTE8(GCPtrBase);
5517 }
5518 }
5519 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5520 }
5521 return rcStrict;
5522}
5523
5524
5525/**
5526 * Pushes a word onto the stack.
5527 *
5528 * @returns Strict VBox status code.
5529 * @param pIemCpu The IEM per CPU data.
5530 * @param u16Value The value to push.
5531 */
5532static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5533{
5534 /* Increment the stack pointer. */
5535 uint64_t uNewRsp;
5536 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5537 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5538
5539 /* Write the word the lazy way. */
5540 uint16_t *pu16Dst;
5541 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5542 if (rc == VINF_SUCCESS)
5543 {
5544 *pu16Dst = u16Value;
5545 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5546 }
5547
5548 /* Commit the new RSP value unless we an access handler made trouble. */
5549 if (rc == VINF_SUCCESS)
5550 pCtx->rsp = uNewRsp;
5551
5552 return rc;
5553}
5554
5555
5556/**
5557 * Pushes a dword onto the stack.
5558 *
5559 * @returns Strict VBox status code.
5560 * @param pIemCpu The IEM per CPU data.
5561 * @param u32Value The value to push.
5562 */
5563static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5564{
5565 /* Increment the stack pointer. */
5566 uint64_t uNewRsp;
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5569
5570 /* Write the word the lazy way. */
5571 uint32_t *pu32Dst;
5572 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5573 if (rc == VINF_SUCCESS)
5574 {
5575 *pu32Dst = u32Value;
5576 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5577 }
5578
5579 /* Commit the new RSP value unless we an access handler made trouble. */
5580 if (rc == VINF_SUCCESS)
5581 pCtx->rsp = uNewRsp;
5582
5583 return rc;
5584}
5585
5586
5587/**
5588 * Pushes a qword onto the stack.
5589 *
5590 * @returns Strict VBox status code.
5591 * @param pIemCpu The IEM per CPU data.
5592 * @param u64Value The value to push.
5593 */
5594static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5595{
5596 /* Increment the stack pointer. */
5597 uint64_t uNewRsp;
5598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5599 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5600
5601 /* Write the word the lazy way. */
5602 uint64_t *pu64Dst;
5603 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5604 if (rc == VINF_SUCCESS)
5605 {
5606 *pu64Dst = u64Value;
5607 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5608 }
5609
5610 /* Commit the new RSP value unless we an access handler made trouble. */
5611 if (rc == VINF_SUCCESS)
5612 pCtx->rsp = uNewRsp;
5613
5614 return rc;
5615}
5616
5617
5618/**
5619 * Pops a word from the stack.
5620 *
5621 * @returns Strict VBox status code.
5622 * @param pIemCpu The IEM per CPU data.
5623 * @param pu16Value Where to store the popped value.
5624 */
5625static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5626{
5627 /* Increment the stack pointer. */
5628 uint64_t uNewRsp;
5629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5630 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5631
5632 /* Write the word the lazy way. */
5633 uint16_t const *pu16Src;
5634 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5635 if (rc == VINF_SUCCESS)
5636 {
5637 *pu16Value = *pu16Src;
5638 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5639
5640 /* Commit the new RSP value. */
5641 if (rc == VINF_SUCCESS)
5642 pCtx->rsp = uNewRsp;
5643 }
5644
5645 return rc;
5646}
5647
5648
5649/**
5650 * Pops a dword from the stack.
5651 *
5652 * @returns Strict VBox status code.
5653 * @param pIemCpu The IEM per CPU data.
5654 * @param pu32Value Where to store the popped value.
5655 */
5656static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5657{
5658 /* Increment the stack pointer. */
5659 uint64_t uNewRsp;
5660 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5661 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5662
5663 /* Write the word the lazy way. */
5664 uint32_t const *pu32Src;
5665 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5666 if (rc == VINF_SUCCESS)
5667 {
5668 *pu32Value = *pu32Src;
5669 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5670
5671 /* Commit the new RSP value. */
5672 if (rc == VINF_SUCCESS)
5673 pCtx->rsp = uNewRsp;
5674 }
5675
5676 return rc;
5677}
5678
5679
5680/**
5681 * Pops a qword from the stack.
5682 *
5683 * @returns Strict VBox status code.
5684 * @param pIemCpu The IEM per CPU data.
5685 * @param pu64Value Where to store the popped value.
5686 */
5687static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5688{
5689 /* Increment the stack pointer. */
5690 uint64_t uNewRsp;
5691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5692 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5693
5694 /* Write the word the lazy way. */
5695 uint64_t const *pu64Src;
5696 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5697 if (rc == VINF_SUCCESS)
5698 {
5699 *pu64Value = *pu64Src;
5700 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5701
5702 /* Commit the new RSP value. */
5703 if (rc == VINF_SUCCESS)
5704 pCtx->rsp = uNewRsp;
5705 }
5706
5707 return rc;
5708}
5709
5710
5711/**
5712 * Pushes a word onto the stack, using a temporary stack pointer.
5713 *
5714 * @returns Strict VBox status code.
5715 * @param pIemCpu The IEM per CPU data.
5716 * @param u16Value The value to push.
5717 * @param pTmpRsp Pointer to the temporary stack pointer.
5718 */
5719static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5720{
5721 /* Increment the stack pointer. */
5722 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5723 RTUINT64U NewRsp = *pTmpRsp;
5724 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5725
5726 /* Write the word the lazy way. */
5727 uint16_t *pu16Dst;
5728 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5729 if (rc == VINF_SUCCESS)
5730 {
5731 *pu16Dst = u16Value;
5732 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5733 }
5734
5735 /* Commit the new RSP value unless we an access handler made trouble. */
5736 if (rc == VINF_SUCCESS)
5737 *pTmpRsp = NewRsp;
5738
5739 return rc;
5740}
5741
5742
5743/**
5744 * Pushes a dword onto the stack, using a temporary stack pointer.
5745 *
5746 * @returns Strict VBox status code.
5747 * @param pIemCpu The IEM per CPU data.
5748 * @param u32Value The value to push.
5749 * @param pTmpRsp Pointer to the temporary stack pointer.
5750 */
5751static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5752{
5753 /* Increment the stack pointer. */
5754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5755 RTUINT64U NewRsp = *pTmpRsp;
5756 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5757
5758 /* Write the word the lazy way. */
5759 uint32_t *pu32Dst;
5760 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5761 if (rc == VINF_SUCCESS)
5762 {
5763 *pu32Dst = u32Value;
5764 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5765 }
5766
5767 /* Commit the new RSP value unless we an access handler made trouble. */
5768 if (rc == VINF_SUCCESS)
5769 *pTmpRsp = NewRsp;
5770
5771 return rc;
5772}
5773
5774
5775/**
5776 * Pushes a dword onto the stack, using a temporary stack pointer.
5777 *
5778 * @returns Strict VBox status code.
5779 * @param pIemCpu The IEM per CPU data.
5780 * @param u64Value The value to push.
5781 * @param pTmpRsp Pointer to the temporary stack pointer.
5782 */
5783static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5784{
5785 /* Increment the stack pointer. */
5786 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5787 RTUINT64U NewRsp = *pTmpRsp;
5788 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5789
5790 /* Write the word the lazy way. */
5791 uint64_t *pu64Dst;
5792 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5793 if (rc == VINF_SUCCESS)
5794 {
5795 *pu64Dst = u64Value;
5796 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5797 }
5798
5799 /* Commit the new RSP value unless we an access handler made trouble. */
5800 if (rc == VINF_SUCCESS)
5801 *pTmpRsp = NewRsp;
5802
5803 return rc;
5804}
5805
5806
5807/**
5808 * Pops a word from the stack, using a temporary stack pointer.
5809 *
5810 * @returns Strict VBox status code.
5811 * @param pIemCpu The IEM per CPU data.
5812 * @param pu16Value Where to store the popped value.
5813 * @param pTmpRsp Pointer to the temporary stack pointer.
5814 */
5815static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5816{
5817 /* Increment the stack pointer. */
5818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5819 RTUINT64U NewRsp = *pTmpRsp;
5820 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5821
5822 /* Write the word the lazy way. */
5823 uint16_t const *pu16Src;
5824 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5825 if (rc == VINF_SUCCESS)
5826 {
5827 *pu16Value = *pu16Src;
5828 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5829
5830 /* Commit the new RSP value. */
5831 if (rc == VINF_SUCCESS)
5832 *pTmpRsp = NewRsp;
5833 }
5834
5835 return rc;
5836}
5837
5838
5839/**
5840 * Pops a dword from the stack, using a temporary stack pointer.
5841 *
5842 * @returns Strict VBox status code.
5843 * @param pIemCpu The IEM per CPU data.
5844 * @param pu32Value Where to store the popped value.
5845 * @param pTmpRsp Pointer to the temporary stack pointer.
5846 */
5847static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5848{
5849 /* Increment the stack pointer. */
5850 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5851 RTUINT64U NewRsp = *pTmpRsp;
5852 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5853
5854 /* Write the word the lazy way. */
5855 uint32_t const *pu32Src;
5856 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5857 if (rc == VINF_SUCCESS)
5858 {
5859 *pu32Value = *pu32Src;
5860 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5861
5862 /* Commit the new RSP value. */
5863 if (rc == VINF_SUCCESS)
5864 *pTmpRsp = NewRsp;
5865 }
5866
5867 return rc;
5868}
5869
5870
5871/**
5872 * Pops a qword from the stack, using a temporary stack pointer.
5873 *
5874 * @returns Strict VBox status code.
5875 * @param pIemCpu The IEM per CPU data.
5876 * @param pu64Value Where to store the popped value.
5877 * @param pTmpRsp Pointer to the temporary stack pointer.
5878 */
5879static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5880{
5881 /* Increment the stack pointer. */
5882 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5883 RTUINT64U NewRsp = *pTmpRsp;
5884 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5885
5886 /* Write the word the lazy way. */
5887 uint64_t const *pu64Src;
5888 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5889 if (rcStrict == VINF_SUCCESS)
5890 {
5891 *pu64Value = *pu64Src;
5892 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5893
5894 /* Commit the new RSP value. */
5895 if (rcStrict == VINF_SUCCESS)
5896 *pTmpRsp = NewRsp;
5897 }
5898
5899 return rcStrict;
5900}
5901
5902
5903/**
5904 * Begin a special stack push (used by interrupt, exceptions and such).
5905 *
5906 * This will raise #SS or #PF if appropriate.
5907 *
5908 * @returns Strict VBox status code.
5909 * @param pIemCpu The IEM per CPU data.
5910 * @param cbMem The number of bytes to push onto the stack.
5911 * @param ppvMem Where to return the pointer to the stack memory.
5912 * As with the other memory functions this could be
5913 * direct access or bounce buffered access, so
5914 * don't commit register until the commit call
5915 * succeeds.
5916 * @param puNewRsp Where to return the new RSP value. This must be
5917 * passed unchanged to
5918 * iemMemStackPushCommitSpecial().
5919 */
5920static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
5921{
5922 Assert(cbMem < UINT8_MAX);
5923 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5924 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
5925 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5926}
5927
5928
5929/**
5930 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
5931 *
5932 * This will update the rSP.
5933 *
5934 * @returns Strict VBox status code.
5935 * @param pIemCpu The IEM per CPU data.
5936 * @param pvMem The pointer returned by
5937 * iemMemStackPushBeginSpecial().
5938 * @param uNewRsp The new RSP value returned by
5939 * iemMemStackPushBeginSpecial().
5940 */
5941static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
5942{
5943 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
5944 if (rcStrict == VINF_SUCCESS)
5945 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5946 return rcStrict;
5947}
5948
5949
5950/**
5951 * Begin a special stack pop (used by iret, retf and such).
5952 *
5953 * This will raise \#SS or \#PF if appropriate.
5954 *
5955 * @returns Strict VBox status code.
5956 * @param pIemCpu The IEM per CPU data.
5957 * @param cbMem The number of bytes to push onto the stack.
5958 * @param ppvMem Where to return the pointer to the stack memory.
5959 * @param puNewRsp Where to return the new RSP value. This must be
5960 * passed unchanged to
5961 * iemMemStackPopCommitSpecial() or applied
5962 * manually if iemMemStackPopDoneSpecial() is used.
5963 */
5964static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5965{
5966 Assert(cbMem < UINT8_MAX);
5967 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5968 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
5969 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5970}
5971
5972
5973/**
5974 * Continue a special stack pop (used by iret and retf).
5975 *
5976 * This will raise \#SS or \#PF if appropriate.
5977 *
5978 * @returns Strict VBox status code.
5979 * @param pIemCpu The IEM per CPU data.
5980 * @param cbMem The number of bytes to push onto the stack.
5981 * @param ppvMem Where to return the pointer to the stack memory.
5982 * @param puNewRsp Where to return the new RSP value. This must be
5983 * passed unchanged to
5984 * iemMemStackPopCommitSpecial() or applied
5985 * manually if iemMemStackPopDoneSpecial() is used.
5986 */
5987static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5988{
5989 Assert(cbMem < UINT8_MAX);
5990 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5991 RTUINT64U NewRsp;
5992 NewRsp.u = *puNewRsp;
5993 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5994 *puNewRsp = NewRsp.u;
5995 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5996}
5997
5998
5999/**
6000 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6001 *
6002 * This will update the rSP.
6003 *
6004 * @returns Strict VBox status code.
6005 * @param pIemCpu The IEM per CPU data.
6006 * @param pvMem The pointer returned by
6007 * iemMemStackPopBeginSpecial().
6008 * @param uNewRsp The new RSP value returned by
6009 * iemMemStackPopBeginSpecial().
6010 */
6011static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6012{
6013 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6014 if (rcStrict == VINF_SUCCESS)
6015 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6016 return rcStrict;
6017}
6018
6019
6020/**
6021 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6022 * iemMemStackPopContinueSpecial).
6023 *
6024 * The caller will manually commit the rSP.
6025 *
6026 * @returns Strict VBox status code.
6027 * @param pIemCpu The IEM per CPU data.
6028 * @param pvMem The pointer returned by
6029 * iemMemStackPopBeginSpecial() or
6030 * iemMemStackPopContinueSpecial().
6031 */
6032static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6033{
6034 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6035}
6036
6037
6038/**
6039 * Fetches a system table dword.
6040 *
6041 * @returns Strict VBox status code.
6042 * @param pIemCpu The IEM per CPU data.
6043 * @param pu32Dst Where to return the dword.
6044 * @param iSegReg The index of the segment register to use for
6045 * this access. The base and limits are checked.
6046 * @param GCPtrMem The address of the guest memory.
6047 */
6048static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6049{
6050 /* The lazy approach for now... */
6051 uint32_t const *pu32Src;
6052 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6053 if (rc == VINF_SUCCESS)
6054 {
6055 *pu32Dst = *pu32Src;
6056 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6057 }
6058 return rc;
6059}
6060
6061
6062/**
6063 * Fetches a system table qword.
6064 *
6065 * @returns Strict VBox status code.
6066 * @param pIemCpu The IEM per CPU data.
6067 * @param pu64Dst Where to return the qword.
6068 * @param iSegReg The index of the segment register to use for
6069 * this access. The base and limits are checked.
6070 * @param GCPtrMem The address of the guest memory.
6071 */
6072static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6073{
6074 /* The lazy approach for now... */
6075 uint64_t const *pu64Src;
6076 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6077 if (rc == VINF_SUCCESS)
6078 {
6079 *pu64Dst = *pu64Src;
6080 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6081 }
6082 return rc;
6083}
6084
6085
6086/**
6087 * Fetches a descriptor table entry.
6088 *
6089 * @returns Strict VBox status code.
6090 * @param pIemCpu The IEM per CPU.
6091 * @param pDesc Where to return the descriptor table entry.
6092 * @param uSel The selector which table entry to fetch.
6093 */
6094static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6095{
6096 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6097
6098 /** @todo did the 286 require all 8 bytes to be accessible? */
6099 /*
6100 * Get the selector table base and check bounds.
6101 */
6102 RTGCPTR GCPtrBase;
6103 if (uSel & X86_SEL_LDT)
6104 {
6105 if ( !pCtx->ldtr.Attr.n.u1Present
6106 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6107 {
6108 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6109 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6110 /** @todo is this the right exception? */
6111 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6112 }
6113
6114 Assert(pCtx->ldtr.Attr.n.u1Present);
6115 GCPtrBase = pCtx->ldtr.u64Base;
6116 }
6117 else
6118 {
6119 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6120 {
6121 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6122 /** @todo is this the right exception? */
6123 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6124 }
6125 GCPtrBase = pCtx->gdtr.pGdt;
6126 }
6127
6128 /*
6129 * Read the legacy descriptor and maybe the long mode extensions if
6130 * required.
6131 */
6132 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6133 if (rcStrict == VINF_SUCCESS)
6134 {
6135 if ( !IEM_IS_LONG_MODE(pIemCpu)
6136 || pDesc->Legacy.Gen.u1DescType)
6137 pDesc->Long.au64[1] = 0;
6138 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6139 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6140 else
6141 {
6142 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6143 /** @todo is this the right exception? */
6144 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6145 }
6146 }
6147 return rcStrict;
6148}
6149
6150
6151/**
6152 * Fakes a long mode stack selector for SS = 0.
6153 *
6154 * @param pDescSs Where to return the fake stack descriptor.
6155 * @param uDpl The DPL we want.
6156 */
6157static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6158{
6159 pDescSs->Long.au64[0] = 0;
6160 pDescSs->Long.au64[1] = 0;
6161 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6162 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6163 pDescSs->Long.Gen.u2Dpl = uDpl;
6164 pDescSs->Long.Gen.u1Present = 1;
6165 pDescSs->Long.Gen.u1Long = 1;
6166}
6167
6168
6169/**
6170 * Marks the selector descriptor as accessed (only non-system descriptors).
6171 *
6172 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6173 * will therefore skip the limit checks.
6174 *
6175 * @returns Strict VBox status code.
6176 * @param pIemCpu The IEM per CPU.
6177 * @param uSel The selector.
6178 */
6179static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6180{
6181 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6182
6183 /*
6184 * Get the selector table base and calculate the entry address.
6185 */
6186 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6187 ? pCtx->ldtr.u64Base
6188 : pCtx->gdtr.pGdt;
6189 GCPtr += uSel & X86_SEL_MASK;
6190
6191 /*
6192 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6193 * ugly stuff to avoid this. This will make sure it's an atomic access
6194 * as well more or less remove any question about 8-bit or 32-bit accesss.
6195 */
6196 VBOXSTRICTRC rcStrict;
6197 uint32_t volatile *pu32;
6198 if ((GCPtr & 3) == 0)
6199 {
6200 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6201 GCPtr += 2 + 2;
6202 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6203 if (rcStrict != VINF_SUCCESS)
6204 return rcStrict;
6205 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6206 }
6207 else
6208 {
6209 /* The misaligned GDT/LDT case, map the whole thing. */
6210 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6211 if (rcStrict != VINF_SUCCESS)
6212 return rcStrict;
6213 switch ((uintptr_t)pu32 & 3)
6214 {
6215 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6216 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6217 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6218 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6219 }
6220 }
6221
6222 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6223}
6224
6225/** @} */
6226
6227
6228/*
6229 * Include the C/C++ implementation of instruction.
6230 */
6231#include "IEMAllCImpl.cpp.h"
6232
6233
6234
6235/** @name "Microcode" macros.
6236 *
6237 * The idea is that we should be able to use the same code to interpret
6238 * instructions as well as recompiler instructions. Thus this obfuscation.
6239 *
6240 * @{
6241 */
6242#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6243#define IEM_MC_END() }
6244#define IEM_MC_PAUSE() do {} while (0)
6245#define IEM_MC_CONTINUE() do {} while (0)
6246
6247/** Internal macro. */
6248#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6249 do \
6250 { \
6251 VBOXSTRICTRC rcStrict2 = a_Expr; \
6252 if (rcStrict2 != VINF_SUCCESS) \
6253 return rcStrict2; \
6254 } while (0)
6255
6256#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6257#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6258#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6259#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6260#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6261#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6262#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6263
6264#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6265#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6266 do { \
6267 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6268 return iemRaiseDeviceNotAvailable(pIemCpu); \
6269 } while (0)
6270#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6271 do { \
6272 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6273 return iemRaiseMathFault(pIemCpu); \
6274 } while (0)
6275#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6276 do { \
6277 if (pIemCpu->uCpl != 0) \
6278 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6279 } while (0)
6280
6281
6282#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6283#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6284#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6285#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6286#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6287#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6288#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6289 uint32_t a_Name; \
6290 uint32_t *a_pName = &a_Name
6291#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6292 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6293
6294#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6295#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6296
6297#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6298#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6299#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6300#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6301#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6302#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6303#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6304#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6305#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6306#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6307#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6308#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6309#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6310#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6311#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6312#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6313#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6314#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6315#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6316#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6317#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6318#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6319#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6320#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6321#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6322#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6323#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6324#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6325#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6326#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6327#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6328#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6329#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6330
6331#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6332#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6333#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6334#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6335#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6336#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6337#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6338#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6339#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6340#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6341#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6342 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6343
6344#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6345#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6346/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6347 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6348#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6349#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6350#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6351
6352#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6353#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6354#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6355 do { \
6356 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6357 *pu32Reg += (a_u32Value); \
6358 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6359 } while (0)
6360#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6361
6362#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6363#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6364#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6365 do { \
6366 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6367 *pu32Reg -= (a_u32Value); \
6368 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6369 } while (0)
6370#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6371
6372#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6373#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6374#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6375#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6376#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6377#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6378#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6379
6380#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6381#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6382#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6383#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6384
6385#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6386#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6387#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6388
6389#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6390#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6391
6392#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6393#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6394#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6395
6396#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6397#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6398#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6399
6400#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6401
6402#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6403
6404#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6405#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6406#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6407 do { \
6408 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6409 *pu32Reg &= (a_u32Value); \
6410 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6411 } while (0)
6412#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6413
6414#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6415#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6416#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6417 do { \
6418 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6419 *pu32Reg |= (a_u32Value); \
6420 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6421 } while (0)
6422#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6423
6424
6425#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6426#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6427#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6428
6429#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6430
6431
6432#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6433 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6434#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6435 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6436#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6437 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6438
6439#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6440 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6441#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6442 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6443#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6444 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6445
6446#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6447 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6448#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6449 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6450#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6451 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6452
6453#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6454 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6455
6456#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6457 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6458#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6459 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6460
6461#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6462 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6463#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6464 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6465#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6466 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6467
6468
6469#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6470 do { \
6471 uint8_t u8Tmp; \
6472 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6473 (a_u16Dst) = u8Tmp; \
6474 } while (0)
6475#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6476 do { \
6477 uint8_t u8Tmp; \
6478 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6479 (a_u32Dst) = u8Tmp; \
6480 } while (0)
6481#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6482 do { \
6483 uint8_t u8Tmp; \
6484 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6485 (a_u64Dst) = u8Tmp; \
6486 } while (0)
6487#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6488 do { \
6489 uint16_t u16Tmp; \
6490 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6491 (a_u32Dst) = u16Tmp; \
6492 } while (0)
6493#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6494 do { \
6495 uint16_t u16Tmp; \
6496 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6497 (a_u64Dst) = u16Tmp; \
6498 } while (0)
6499#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6500 do { \
6501 uint32_t u32Tmp; \
6502 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6503 (a_u64Dst) = u32Tmp; \
6504 } while (0)
6505
6506#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6507 do { \
6508 uint8_t u8Tmp; \
6509 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6510 (a_u16Dst) = (int8_t)u8Tmp; \
6511 } while (0)
6512#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6513 do { \
6514 uint8_t u8Tmp; \
6515 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6516 (a_u32Dst) = (int8_t)u8Tmp; \
6517 } while (0)
6518#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6519 do { \
6520 uint8_t u8Tmp; \
6521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6522 (a_u64Dst) = (int8_t)u8Tmp; \
6523 } while (0)
6524#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6525 do { \
6526 uint16_t u16Tmp; \
6527 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6528 (a_u32Dst) = (int16_t)u16Tmp; \
6529 } while (0)
6530#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6531 do { \
6532 uint16_t u16Tmp; \
6533 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6534 (a_u64Dst) = (int16_t)u16Tmp; \
6535 } while (0)
6536#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6537 do { \
6538 uint32_t u32Tmp; \
6539 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6540 (a_u64Dst) = (int32_t)u32Tmp; \
6541 } while (0)
6542
6543#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6544 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6545#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6546 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6547#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6548 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6549#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6550 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6551
6552#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6553 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6554#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6555 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6556#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6557 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6558#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6559 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6560
6561#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6562#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6563#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6564#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6565#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6566#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6567#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6568 do { \
6569 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6570 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6571 } while (0)
6572
6573
6574#define IEM_MC_PUSH_U16(a_u16Value) \
6575 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6576#define IEM_MC_PUSH_U32(a_u32Value) \
6577 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6578#define IEM_MC_PUSH_U64(a_u64Value) \
6579 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6580
6581#define IEM_MC_POP_U16(a_pu16Value) \
6582 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6583#define IEM_MC_POP_U32(a_pu32Value) \
6584 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6585#define IEM_MC_POP_U64(a_pu64Value) \
6586 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6587
6588/** Maps guest memory for direct or bounce buffered access.
6589 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6590 * @remarks May return.
6591 */
6592#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6593 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6594
6595/** Maps guest memory for direct or bounce buffered access.
6596 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6597 * @remarks May return.
6598 */
6599#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6600 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6601
6602/** Commits the memory and unmaps the guest memory.
6603 * @remarks May return.
6604 */
6605#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6606 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6607
6608/** Commits the memory and unmaps the guest memory unless the FPU status word
6609 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6610 * that would cause FLD not to store.
6611 *
6612 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6613 * store, while \#P will not.
6614 *
6615 * @remarks May in theory return - for now.
6616 */
6617#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6618 do { \
6619 if ( !(a_u16FSW & X86_FSW_ES) \
6620 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6621 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6622 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6623 } while (0)
6624
6625/** Calculate efficient address from R/M. */
6626#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6627 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6628
6629#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6630#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6631#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6632#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6633#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6634
6635/**
6636 * Defers the rest of the instruction emulation to a C implementation routine
6637 * and returns, only taking the standard parameters.
6638 *
6639 * @param a_pfnCImpl The pointer to the C routine.
6640 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6641 */
6642#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6643
6644/**
6645 * Defers the rest of instruction emulation to a C implementation routine and
6646 * returns, taking one argument in addition to the standard ones.
6647 *
6648 * @param a_pfnCImpl The pointer to the C routine.
6649 * @param a0 The argument.
6650 */
6651#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6652
6653/**
6654 * Defers the rest of the instruction emulation to a C implementation routine
6655 * and returns, taking two arguments in addition to the standard ones.
6656 *
6657 * @param a_pfnCImpl The pointer to the C routine.
6658 * @param a0 The first extra argument.
6659 * @param a1 The second extra argument.
6660 */
6661#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6662
6663/**
6664 * Defers the rest of the instruction emulation to a C implementation routine
6665 * and returns, taking two arguments in addition to the standard ones.
6666 *
6667 * @param a_pfnCImpl The pointer to the C routine.
6668 * @param a0 The first extra argument.
6669 * @param a1 The second extra argument.
6670 * @param a2 The third extra argument.
6671 */
6672#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6673
6674/**
6675 * Defers the rest of the instruction emulation to a C implementation routine
6676 * and returns, taking two arguments in addition to the standard ones.
6677 *
6678 * @param a_pfnCImpl The pointer to the C routine.
6679 * @param a0 The first extra argument.
6680 * @param a1 The second extra argument.
6681 * @param a2 The third extra argument.
6682 * @param a3 The fourth extra argument.
6683 * @param a4 The fifth extra argument.
6684 */
6685#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6686
6687/**
6688 * Defers the entire instruction emulation to a C implementation routine and
6689 * returns, only taking the standard parameters.
6690 *
6691 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6692 *
6693 * @param a_pfnCImpl The pointer to the C routine.
6694 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6695 */
6696#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6697
6698/**
6699 * Defers the entire instruction emulation to a C implementation routine and
6700 * returns, taking one argument in addition to the standard ones.
6701 *
6702 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6703 *
6704 * @param a_pfnCImpl The pointer to the C routine.
6705 * @param a0 The argument.
6706 */
6707#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6708
6709/**
6710 * Defers the entire instruction emulation to a C implementation routine and
6711 * returns, taking two arguments in addition to the standard ones.
6712 *
6713 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6714 *
6715 * @param a_pfnCImpl The pointer to the C routine.
6716 * @param a0 The first extra argument.
6717 * @param a1 The second extra argument.
6718 */
6719#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6720
6721/**
6722 * Defers the entire instruction emulation to a C implementation routine and
6723 * returns, taking three arguments in addition to the standard ones.
6724 *
6725 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6726 *
6727 * @param a_pfnCImpl The pointer to the C routine.
6728 * @param a0 The first extra argument.
6729 * @param a1 The second extra argument.
6730 * @param a2 The third extra argument.
6731 */
6732#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6733
6734/**
6735 * Calls a FPU assembly implementation taking one visible argument.
6736 *
6737 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6738 * @param a0 The first extra argument.
6739 */
6740#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6741 do { \
6742 iemFpuPrepareUsage(pIemCpu); \
6743 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6744 } while (0)
6745
6746/**
6747 * Calls a FPU assembly implementation taking two visible arguments.
6748 *
6749 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6750 * @param a0 The first extra argument.
6751 * @param a1 The second extra argument.
6752 */
6753#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6754 do { \
6755 iemFpuPrepareUsage(pIemCpu); \
6756 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6757 } while (0)
6758
6759/**
6760 * Calls a FPU assembly implementation taking three visible arguments.
6761 *
6762 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6763 * @param a0 The first extra argument.
6764 * @param a1 The second extra argument.
6765 * @param a2 The third extra argument.
6766 */
6767#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6768 do { \
6769 iemFpuPrepareUsage(pIemCpu); \
6770 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6771 } while (0)
6772
6773#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6774 do { \
6775 (a_FpuData).FSW = (a_FSW); \
6776 (a_FpuData).r80Result = *(a_pr80Value); \
6777 } while (0)
6778
6779/** Pushes FPU result onto the stack. */
6780#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6781 iemFpuPushResult(pIemCpu, &a_FpuData)
6782/** Pushes FPU result onto the stack and sets the FPUDP. */
6783#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6784 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6785
6786/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6787#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6788 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6789
6790/** Stores FPU result in a stack register. */
6791#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6792 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6793/** Stores FPU result in a stack register and pops the stack. */
6794#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6795 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6796/** Stores FPU result in a stack register and sets the FPUDP. */
6797#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6798 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6799/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6800 * stack. */
6801#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6802 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6803
6804/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6805#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6806 iemFpuUpdateOpcodeAndIp(pIemCpu)
6807/** Free a stack register (for FFREE and FFREEP). */
6808#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6809 iemFpuStackFree(pIemCpu, a_iStReg)
6810/** Increment the FPU stack pointer. */
6811#define IEM_MC_FPU_STACK_INC_TOP() \
6812 iemFpuStackIncTop(pIemCpu)
6813/** Decrement the FPU stack pointer. */
6814#define IEM_MC_FPU_STACK_DEC_TOP() \
6815 iemFpuStackDecTop(pIemCpu)
6816
6817/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6818#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6819 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6820/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6821#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6822 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6823/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6824#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6825 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6826/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6827#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6828 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6829/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6830 * stack. */
6831#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6832 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6833/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6834#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6835 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6836
6837/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6838#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6839 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6840/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6841 * stack. */
6842#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6843 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6844/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6845 * FPUDS. */
6846#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6847 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6848/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6849 * FPUDS. Pops stack. */
6850#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6851 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6852/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6853 * stack twice. */
6854#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6855 iemFpuStackUnderflowThenPopPop(pIemCpu)
6856/** Raises a FPU stack underflow exception for an instruction pushing a result
6857 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6858#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6859 iemFpuStackPushUnderflow(pIemCpu)
6860/** Raises a FPU stack underflow exception for an instruction pushing a result
6861 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6862#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6863 iemFpuStackPushUnderflowTwo(pIemCpu)
6864
6865/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6866 * FPUIP, FPUCS and FOP. */
6867#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6868 iemFpuStackPushOverflow(pIemCpu)
6869/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6870 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6871#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6872 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6873/** Indicates that we (might) have modified the FPU state. */
6874#define IEM_MC_USED_FPU() \
6875 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
6876
6877#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6878#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6879#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6880#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6881#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6882 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6883 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6884#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
6885 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6886 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6887#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
6888 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6889 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6890 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6891#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
6892 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6893 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6894 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6895#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
6896#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
6897#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
6898#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6899 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6900 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6901#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6902 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6903 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6904#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6905 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6906 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6907#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6908 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6909 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6910#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6911 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6912 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6913#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6914 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6915 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6916#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
6917#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
6918#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
6919 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
6920#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
6921 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
6922#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
6923 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
6924#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
6925 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
6926#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
6927 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
6928#define IEM_MC_IF_FCW_IM() \
6929 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
6930
6931#define IEM_MC_ELSE() } else {
6932#define IEM_MC_ENDIF() } do {} while (0)
6933
6934/** @} */
6935
6936
6937/** @name Opcode Debug Helpers.
6938 * @{
6939 */
6940#ifdef DEBUG
6941# define IEMOP_MNEMONIC(a_szMnemonic) \
6942 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
6943 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
6944# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
6945 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
6946 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
6947#else
6948# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
6949# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
6950#endif
6951
6952/** @} */
6953
6954
6955/** @name Opcode Helpers.
6956 * @{
6957 */
6958
6959/** The instruction raises an \#UD in real and V8086 mode. */
6960#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
6961 do \
6962 { \
6963 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
6964 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6965 } while (0)
6966
6967/** The instruction allows no lock prefixing (in this encoding), throw #UD if
6968 * lock prefixed.
6969 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
6970#define IEMOP_HLP_NO_LOCK_PREFIX() \
6971 do \
6972 { \
6973 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
6974 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6975 } while (0)
6976
6977/** The instruction is not available in 64-bit mode, throw #UD if we're in
6978 * 64-bit mode. */
6979#define IEMOP_HLP_NO_64BIT() \
6980 do \
6981 { \
6982 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6983 return IEMOP_RAISE_INVALID_OPCODE(); \
6984 } while (0)
6985
6986/** The instruction defaults to 64-bit operand size if 64-bit mode. */
6987#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
6988 do \
6989 { \
6990 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6991 iemRecalEffOpSize64Default(pIemCpu); \
6992 } while (0)
6993
6994/** The instruction has 64-bit operand size if 64-bit mode. */
6995#define IEMOP_HLP_64BIT_OP_SIZE() \
6996 do \
6997 { \
6998 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6999 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7000 } while (0)
7001
7002/**
7003 * Done decoding.
7004 */
7005#define IEMOP_HLP_DONE_DECODING() \
7006 do \
7007 { \
7008 /*nothing for now, maybe later... */ \
7009 } while (0)
7010
7011/**
7012 * Done decoding, raise \#UD exception if lock prefix present.
7013 */
7014#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7015 do \
7016 { \
7017 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7018 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7019 } while (0)
7020
7021
7022/**
7023 * Calculates the effective address of a ModR/M memory operand.
7024 *
7025 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7026 *
7027 * @return Strict VBox status code.
7028 * @param pIemCpu The IEM per CPU data.
7029 * @param bRm The ModRM byte.
7030 * @param pGCPtrEff Where to return the effective address.
7031 */
7032static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
7033{
7034 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7035 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7036#define SET_SS_DEF() \
7037 do \
7038 { \
7039 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7040 pIemCpu->iEffSeg = X86_SREG_SS; \
7041 } while (0)
7042
7043/** @todo Check the effective address size crap! */
7044 switch (pIemCpu->enmEffAddrMode)
7045 {
7046 case IEMMODE_16BIT:
7047 {
7048 uint16_t u16EffAddr;
7049
7050 /* Handle the disp16 form with no registers first. */
7051 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7052 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7053 else
7054 {
7055 /* Get the displacment. */
7056 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7057 {
7058 case 0: u16EffAddr = 0; break;
7059 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7060 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7061 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7062 }
7063
7064 /* Add the base and index registers to the disp. */
7065 switch (bRm & X86_MODRM_RM_MASK)
7066 {
7067 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7068 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7069 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7070 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7071 case 4: u16EffAddr += pCtx->si; break;
7072 case 5: u16EffAddr += pCtx->di; break;
7073 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7074 case 7: u16EffAddr += pCtx->bx; break;
7075 }
7076 }
7077
7078 *pGCPtrEff = u16EffAddr;
7079 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
7080 return VINF_SUCCESS;
7081 }
7082
7083 case IEMMODE_32BIT:
7084 {
7085 uint32_t u32EffAddr;
7086
7087 /* Handle the disp32 form with no registers first. */
7088 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7089 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7090 else
7091 {
7092 /* Get the register (or SIB) value. */
7093 switch ((bRm & X86_MODRM_RM_MASK))
7094 {
7095 case 0: u32EffAddr = pCtx->eax; break;
7096 case 1: u32EffAddr = pCtx->ecx; break;
7097 case 2: u32EffAddr = pCtx->edx; break;
7098 case 3: u32EffAddr = pCtx->ebx; break;
7099 case 4: /* SIB */
7100 {
7101 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7102
7103 /* Get the index and scale it. */
7104 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7105 {
7106 case 0: u32EffAddr = pCtx->eax; break;
7107 case 1: u32EffAddr = pCtx->ecx; break;
7108 case 2: u32EffAddr = pCtx->edx; break;
7109 case 3: u32EffAddr = pCtx->ebx; break;
7110 case 4: u32EffAddr = 0; /*none */ break;
7111 case 5: u32EffAddr = pCtx->ebp; break;
7112 case 6: u32EffAddr = pCtx->esi; break;
7113 case 7: u32EffAddr = pCtx->edi; break;
7114 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7115 }
7116 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7117
7118 /* add base */
7119 switch (bSib & X86_SIB_BASE_MASK)
7120 {
7121 case 0: u32EffAddr += pCtx->eax; break;
7122 case 1: u32EffAddr += pCtx->ecx; break;
7123 case 2: u32EffAddr += pCtx->edx; break;
7124 case 3: u32EffAddr += pCtx->ebx; break;
7125 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7126 case 5:
7127 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7128 {
7129 u32EffAddr += pCtx->ebp;
7130 SET_SS_DEF();
7131 }
7132 else
7133 {
7134 uint32_t u32Disp;
7135 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7136 u32EffAddr += u32Disp;
7137 }
7138 break;
7139 case 6: u32EffAddr += pCtx->esi; break;
7140 case 7: u32EffAddr += pCtx->edi; break;
7141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7142 }
7143 break;
7144 }
7145 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7146 case 6: u32EffAddr = pCtx->esi; break;
7147 case 7: u32EffAddr = pCtx->edi; break;
7148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7149 }
7150
7151 /* Get and add the displacement. */
7152 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7153 {
7154 case 0:
7155 break;
7156 case 1:
7157 {
7158 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7159 u32EffAddr += i8Disp;
7160 break;
7161 }
7162 case 2:
7163 {
7164 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7165 u32EffAddr += u32Disp;
7166 break;
7167 }
7168 default:
7169 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7170 }
7171
7172 }
7173 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7174 *pGCPtrEff = u32EffAddr;
7175 else
7176 {
7177 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7178 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7179 }
7180 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7181 return VINF_SUCCESS;
7182 }
7183
7184 case IEMMODE_64BIT:
7185 {
7186 uint64_t u64EffAddr;
7187
7188 /* Handle the rip+disp32 form with no registers first. */
7189 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7190 {
7191 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7192 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
7193 }
7194 else
7195 {
7196 /* Get the register (or SIB) value. */
7197 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7198 {
7199 case 0: u64EffAddr = pCtx->rax; break;
7200 case 1: u64EffAddr = pCtx->rcx; break;
7201 case 2: u64EffAddr = pCtx->rdx; break;
7202 case 3: u64EffAddr = pCtx->rbx; break;
7203 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7204 case 6: u64EffAddr = pCtx->rsi; break;
7205 case 7: u64EffAddr = pCtx->rdi; break;
7206 case 8: u64EffAddr = pCtx->r8; break;
7207 case 9: u64EffAddr = pCtx->r9; break;
7208 case 10: u64EffAddr = pCtx->r10; break;
7209 case 11: u64EffAddr = pCtx->r11; break;
7210 case 13: u64EffAddr = pCtx->r13; break;
7211 case 14: u64EffAddr = pCtx->r14; break;
7212 case 15: u64EffAddr = pCtx->r15; break;
7213 /* SIB */
7214 case 4:
7215 case 12:
7216 {
7217 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7218
7219 /* Get the index and scale it. */
7220 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7221 {
7222 case 0: u64EffAddr = pCtx->rax; break;
7223 case 1: u64EffAddr = pCtx->rcx; break;
7224 case 2: u64EffAddr = pCtx->rdx; break;
7225 case 3: u64EffAddr = pCtx->rbx; break;
7226 case 4: u64EffAddr = 0; /*none */ break;
7227 case 5: u64EffAddr = pCtx->rbp; break;
7228 case 6: u64EffAddr = pCtx->rsi; break;
7229 case 7: u64EffAddr = pCtx->rdi; break;
7230 case 8: u64EffAddr = pCtx->r8; break;
7231 case 9: u64EffAddr = pCtx->r9; break;
7232 case 10: u64EffAddr = pCtx->r10; break;
7233 case 11: u64EffAddr = pCtx->r11; break;
7234 case 12: u64EffAddr = pCtx->r12; break;
7235 case 13: u64EffAddr = pCtx->r13; break;
7236 case 14: u64EffAddr = pCtx->r14; break;
7237 case 15: u64EffAddr = pCtx->r15; break;
7238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7239 }
7240 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7241
7242 /* add base */
7243 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7244 {
7245 case 0: u64EffAddr += pCtx->rax; break;
7246 case 1: u64EffAddr += pCtx->rcx; break;
7247 case 2: u64EffAddr += pCtx->rdx; break;
7248 case 3: u64EffAddr += pCtx->rbx; break;
7249 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7250 case 6: u64EffAddr += pCtx->rsi; break;
7251 case 7: u64EffAddr += pCtx->rdi; break;
7252 case 8: u64EffAddr += pCtx->r8; break;
7253 case 9: u64EffAddr += pCtx->r9; break;
7254 case 10: u64EffAddr += pCtx->r10; break;
7255 case 11: u64EffAddr += pCtx->r11; break;
7256 case 14: u64EffAddr += pCtx->r14; break;
7257 case 15: u64EffAddr += pCtx->r15; break;
7258 /* complicated encodings */
7259 case 5:
7260 case 13:
7261 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7262 {
7263 if (!pIemCpu->uRexB)
7264 {
7265 u64EffAddr += pCtx->rbp;
7266 SET_SS_DEF();
7267 }
7268 else
7269 u64EffAddr += pCtx->r13;
7270 }
7271 else
7272 {
7273 uint32_t u32Disp;
7274 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7275 u64EffAddr += (int32_t)u32Disp;
7276 }
7277 break;
7278 }
7279 break;
7280 }
7281 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7282 }
7283
7284 /* Get and add the displacement. */
7285 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7286 {
7287 case 0:
7288 break;
7289 case 1:
7290 {
7291 int8_t i8Disp;
7292 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7293 u64EffAddr += i8Disp;
7294 break;
7295 }
7296 case 2:
7297 {
7298 uint32_t u32Disp;
7299 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7300 u64EffAddr += (int32_t)u32Disp;
7301 break;
7302 }
7303 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7304 }
7305
7306 }
7307 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7308 *pGCPtrEff = u64EffAddr;
7309 else
7310 *pGCPtrEff = u64EffAddr & UINT16_MAX;
7311 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7312 return VINF_SUCCESS;
7313 }
7314 }
7315
7316 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7317}
7318
7319/** @} */
7320
7321
7322
7323/*
7324 * Include the instructions
7325 */
7326#include "IEMAllInstructions.cpp.h"
7327
7328
7329
7330
7331#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7332
7333/**
7334 * Sets up execution verification mode.
7335 */
7336static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7337{
7338 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7339 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7340
7341 /*
7342 * Always note down the address of the current instruction.
7343 */
7344 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7345 pIemCpu->uOldRip = pOrgCtx->rip;
7346
7347 /*
7348 * Enable verification and/or logging.
7349 */
7350 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7351 if ( pIemCpu->fNoRem
7352 && ( 0
7353#if 0 /* auto enable on first paged protected mode interrupt */
7354 || ( pOrgCtx->eflags.Bits.u1IF
7355 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7356 && TRPMHasTrap(pVCpu)
7357 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7358#endif
7359#if 0
7360 || ( pOrgCtx->cs == 0x10
7361 && ( pOrgCtx->rip == 0x90119e3e
7362 || pOrgCtx->rip == 0x901d9810)
7363#endif
7364#if 0 /* Auto enable DSL - FPU stuff. */
7365 || ( pOrgCtx->cs == 0x10
7366 && (// pOrgCtx->rip == 0xc02ec07f
7367 //|| pOrgCtx->rip == 0xc02ec082
7368 //|| pOrgCtx->rip == 0xc02ec0c9
7369 0
7370 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7371#endif
7372#if 0 /* Auto enable DSL - fstp st0 stuff. */
7373 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7374#endif
7375#if 0
7376 || pOrgCtx->rip == 0x9022bb3a
7377#endif
7378#if 0
7379 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7380#endif
7381#if 0
7382 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7383 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7384#endif
7385#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7386 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7387 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7388 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7389#endif
7390#if 0 /* NT4SP1 - xadd early boot. */
7391 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7392#endif
7393#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7394 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7395#endif
7396#if 0 /* NT4SP1 - cmpxchg (AMD). */
7397 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7398#endif
7399#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7400 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7401#endif
7402#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7403 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7404
7405#endif
7406#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7407 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7408
7409#endif
7410#if 0 /* NT4SP1 - frstor [ecx] */
7411 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7412#endif
7413 )
7414 )
7415 {
7416 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7417 RTLogFlags(NULL, "enabled");
7418 pIemCpu->fNoRem = false;
7419 }
7420
7421 /*
7422 * Switch state.
7423 */
7424 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7425 {
7426 static CPUMCTX s_DebugCtx; /* Ugly! */
7427
7428 s_DebugCtx = *pOrgCtx;
7429 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7430 }
7431
7432 /*
7433 * See if there is an interrupt pending in TRPM and inject it if we can.
7434 */
7435 pIemCpu->uInjectCpl = UINT8_MAX;
7436 if ( pOrgCtx->eflags.Bits.u1IF
7437 && TRPMHasTrap(pVCpu)
7438 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7439 {
7440 uint8_t u8TrapNo;
7441 TRPMEVENT enmType;
7442 RTGCUINT uErrCode;
7443 RTGCPTR uCr2;
7444 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
7445 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7446 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7447 TRPMResetTrap(pVCpu);
7448 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7449 }
7450
7451 /*
7452 * Reset the counters.
7453 */
7454 pIemCpu->cIOReads = 0;
7455 pIemCpu->cIOWrites = 0;
7456 pIemCpu->fIgnoreRaxRdx = false;
7457 pIemCpu->fOverlappingMovs = false;
7458 pIemCpu->fUndefinedEFlags = 0;
7459
7460 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7461 {
7462 /*
7463 * Free all verification records.
7464 */
7465 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7466 pIemCpu->pIemEvtRecHead = NULL;
7467 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7468 do
7469 {
7470 while (pEvtRec)
7471 {
7472 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7473 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7474 pIemCpu->pFreeEvtRec = pEvtRec;
7475 pEvtRec = pNext;
7476 }
7477 pEvtRec = pIemCpu->pOtherEvtRecHead;
7478 pIemCpu->pOtherEvtRecHead = NULL;
7479 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7480 } while (pEvtRec);
7481 }
7482}
7483
7484
7485/**
7486 * Allocate an event record.
7487 * @returns Pointer to a record.
7488 */
7489static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7490{
7491 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7492 return NULL;
7493
7494 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7495 if (pEvtRec)
7496 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7497 else
7498 {
7499 if (!pIemCpu->ppIemEvtRecNext)
7500 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7501
7502 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7503 if (!pEvtRec)
7504 return NULL;
7505 }
7506 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7507 pEvtRec->pNext = NULL;
7508 return pEvtRec;
7509}
7510
7511
7512/**
7513 * IOMMMIORead notification.
7514 */
7515VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7516{
7517 PVMCPU pVCpu = VMMGetCpu(pVM);
7518 if (!pVCpu)
7519 return;
7520 PIEMCPU pIemCpu = &pVCpu->iem.s;
7521 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7522 if (!pEvtRec)
7523 return;
7524 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7525 pEvtRec->u.RamRead.GCPhys = GCPhys;
7526 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7527 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7528 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7529}
7530
7531
7532/**
7533 * IOMMMIOWrite notification.
7534 */
7535VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7536{
7537 PVMCPU pVCpu = VMMGetCpu(pVM);
7538 if (!pVCpu)
7539 return;
7540 PIEMCPU pIemCpu = &pVCpu->iem.s;
7541 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7542 if (!pEvtRec)
7543 return;
7544 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7545 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7546 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7547 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7548 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7549 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7550 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7551 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7552 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7553}
7554
7555
7556/**
7557 * IOMIOPortRead notification.
7558 */
7559VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7560{
7561 PVMCPU pVCpu = VMMGetCpu(pVM);
7562 if (!pVCpu)
7563 return;
7564 PIEMCPU pIemCpu = &pVCpu->iem.s;
7565 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7566 if (!pEvtRec)
7567 return;
7568 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7569 pEvtRec->u.IOPortRead.Port = Port;
7570 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7571 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7572 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7573}
7574
7575/**
7576 * IOMIOPortWrite notification.
7577 */
7578VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7579{
7580 PVMCPU pVCpu = VMMGetCpu(pVM);
7581 if (!pVCpu)
7582 return;
7583 PIEMCPU pIemCpu = &pVCpu->iem.s;
7584 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7585 if (!pEvtRec)
7586 return;
7587 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7588 pEvtRec->u.IOPortWrite.Port = Port;
7589 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7590 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7591 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7592 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7593}
7594
7595
7596VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7597{
7598 AssertFailed();
7599}
7600
7601
7602VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7603{
7604 AssertFailed();
7605}
7606
7607
7608/**
7609 * Fakes and records an I/O port read.
7610 *
7611 * @returns VINF_SUCCESS.
7612 * @param pIemCpu The IEM per CPU data.
7613 * @param Port The I/O port.
7614 * @param pu32Value Where to store the fake value.
7615 * @param cbValue The size of the access.
7616 */
7617static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7618{
7619 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7620 if (pEvtRec)
7621 {
7622 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7623 pEvtRec->u.IOPortRead.Port = Port;
7624 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7625 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7626 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7627 }
7628 pIemCpu->cIOReads++;
7629 *pu32Value = 0xcccccccc;
7630 return VINF_SUCCESS;
7631}
7632
7633
7634/**
7635 * Fakes and records an I/O port write.
7636 *
7637 * @returns VINF_SUCCESS.
7638 * @param pIemCpu The IEM per CPU data.
7639 * @param Port The I/O port.
7640 * @param u32Value The value being written.
7641 * @param cbValue The size of the access.
7642 */
7643static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7644{
7645 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7646 if (pEvtRec)
7647 {
7648 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7649 pEvtRec->u.IOPortWrite.Port = Port;
7650 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7651 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7652 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7653 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7654 }
7655 pIemCpu->cIOWrites++;
7656 return VINF_SUCCESS;
7657}
7658
7659
7660/**
7661 * Used to add extra details about a stub case.
7662 * @param pIemCpu The IEM per CPU state.
7663 */
7664static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7665{
7666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7667 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7668 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7669 char szRegs[4096];
7670 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7671 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7672 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7673 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7674 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7675 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7676 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7677 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7678 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7679 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7680 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7681 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7682 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7683 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7684 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7685 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7686 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7687 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7688 " efer=%016VR{efer}\n"
7689 " pat=%016VR{pat}\n"
7690 " sf_mask=%016VR{sf_mask}\n"
7691 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7692 " lstar=%016VR{lstar}\n"
7693 " star=%016VR{star} cstar=%016VR{cstar}\n"
7694 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7695 );
7696
7697 char szInstr1[256];
7698 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
7699 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7700 szInstr1, sizeof(szInstr1), NULL);
7701 char szInstr2[256];
7702 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
7703 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7704 szInstr2, sizeof(szInstr2), NULL);
7705
7706 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7707}
7708
7709
7710/**
7711 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7712 * dump to the assertion info.
7713 *
7714 * @param pEvtRec The record to dump.
7715 */
7716static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7717{
7718 switch (pEvtRec->enmEvent)
7719 {
7720 case IEMVERIFYEVENT_IOPORT_READ:
7721 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7722 pEvtRec->u.IOPortWrite.Port,
7723 pEvtRec->u.IOPortWrite.cbValue);
7724 break;
7725 case IEMVERIFYEVENT_IOPORT_WRITE:
7726 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7727 pEvtRec->u.IOPortWrite.Port,
7728 pEvtRec->u.IOPortWrite.cbValue,
7729 pEvtRec->u.IOPortWrite.u32Value);
7730 break;
7731 case IEMVERIFYEVENT_RAM_READ:
7732 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7733 pEvtRec->u.RamRead.GCPhys,
7734 pEvtRec->u.RamRead.cb);
7735 break;
7736 case IEMVERIFYEVENT_RAM_WRITE:
7737 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7738 pEvtRec->u.RamWrite.GCPhys,
7739 pEvtRec->u.RamWrite.cb,
7740 (int)pEvtRec->u.RamWrite.cb,
7741 pEvtRec->u.RamWrite.ab);
7742 break;
7743 default:
7744 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7745 break;
7746 }
7747}
7748
7749
7750/**
7751 * Raises an assertion on the specified record, showing the given message with
7752 * a record dump attached.
7753 *
7754 * @param pIemCpu The IEM per CPU data.
7755 * @param pEvtRec1 The first record.
7756 * @param pEvtRec2 The second record.
7757 * @param pszMsg The message explaining why we're asserting.
7758 */
7759static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7760{
7761 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7762 iemVerifyAssertAddRecordDump(pEvtRec1);
7763 iemVerifyAssertAddRecordDump(pEvtRec2);
7764 iemVerifyAssertMsg2(pIemCpu);
7765 RTAssertPanic();
7766}
7767
7768
7769/**
7770 * Raises an assertion on the specified record, showing the given message with
7771 * a record dump attached.
7772 *
7773 * @param pIemCpu The IEM per CPU data.
7774 * @param pEvtRec1 The first record.
7775 * @param pszMsg The message explaining why we're asserting.
7776 */
7777static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7778{
7779 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7780 iemVerifyAssertAddRecordDump(pEvtRec);
7781 iemVerifyAssertMsg2(pIemCpu);
7782 RTAssertPanic();
7783}
7784
7785
7786/**
7787 * Verifies a write record.
7788 *
7789 * @param pIemCpu The IEM per CPU data.
7790 * @param pEvtRec The write record.
7791 */
7792static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7793{
7794 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7795 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7796 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7797 if ( RT_FAILURE(rc)
7798 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7799 {
7800 /* fend off ins */
7801 if ( !pIemCpu->cIOReads
7802 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7803 || ( pEvtRec->u.RamWrite.cb != 1
7804 && pEvtRec->u.RamWrite.cb != 2
7805 && pEvtRec->u.RamWrite.cb != 4) )
7806 {
7807 /* fend off ROMs */
7808 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7809 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7810 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7811 {
7812 /* fend off fxsave */
7813 if (pEvtRec->u.RamWrite.cb != 512)
7814 {
7815 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7816 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7817 RTAssertMsg2Add("REM: %.*Rhxs\n"
7818 "IEM: %.*Rhxs\n",
7819 pEvtRec->u.RamWrite.cb, abBuf,
7820 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7821 iemVerifyAssertAddRecordDump(pEvtRec);
7822 iemVerifyAssertMsg2(pIemCpu);
7823 RTAssertPanic();
7824 }
7825 }
7826 }
7827 }
7828
7829}
7830
7831/**
7832 * Performs the post-execution verfication checks.
7833 */
7834static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7835{
7836 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7837 return;
7838
7839 /*
7840 * Switch back the state.
7841 */
7842 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7843 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7844 Assert(pOrgCtx != pDebugCtx);
7845 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7846
7847 /*
7848 * Execute the instruction in REM.
7849 */
7850 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7851 EMRemLock(pVM);
7852 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7853 AssertRC(rc);
7854 EMRemUnlock(pVM);
7855
7856 /*
7857 * Compare the register states.
7858 */
7859 unsigned cDiffs = 0;
7860 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7861 {
7862 //Log(("REM and IEM ends up with different registers!\n"));
7863
7864# define CHECK_FIELD(a_Field) \
7865 do \
7866 { \
7867 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7868 { \
7869 switch (sizeof(pOrgCtx->a_Field)) \
7870 { \
7871 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7872 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7873 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7874 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7875 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
7876 } \
7877 cDiffs++; \
7878 } \
7879 } while (0)
7880
7881# define CHECK_BIT_FIELD(a_Field) \
7882 do \
7883 { \
7884 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7885 { \
7886 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
7887 cDiffs++; \
7888 } \
7889 } while (0)
7890
7891# define CHECK_SEL(a_Sel) \
7892 do \
7893 { \
7894 CHECK_FIELD(a_Sel.Sel); \
7895 CHECK_FIELD(a_Sel.Attr.u); \
7896 CHECK_FIELD(a_Sel.u64Base); \
7897 CHECK_FIELD(a_Sel.u32Limit); \
7898 CHECK_FIELD(a_Sel.fFlags); \
7899 } while (0)
7900
7901#if 1 /* The recompiler doesn't update these the intel way. */
7902 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
7903 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
7904 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
7905 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
7906 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
7907 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
7908 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
7909 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
7910 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
7911 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
7912#endif
7913 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
7914 {
7915 RTAssertMsg2Weak(" the FPU state differs\n");
7916 cDiffs++;
7917 CHECK_FIELD(fpu.FCW);
7918 CHECK_FIELD(fpu.FSW);
7919 CHECK_FIELD(fpu.FTW);
7920 CHECK_FIELD(fpu.FOP);
7921 CHECK_FIELD(fpu.FPUIP);
7922 CHECK_FIELD(fpu.CS);
7923 CHECK_FIELD(fpu.Rsrvd1);
7924 CHECK_FIELD(fpu.FPUDP);
7925 CHECK_FIELD(fpu.DS);
7926 CHECK_FIELD(fpu.Rsrvd2);
7927 CHECK_FIELD(fpu.MXCSR);
7928 CHECK_FIELD(fpu.MXCSR_MASK);
7929 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
7930 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
7931 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
7932 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
7933 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
7934 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
7935 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
7936 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
7937 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
7938 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
7939 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
7940 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
7941 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
7942 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
7943 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
7944 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
7945 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
7946 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
7947 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
7948 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
7949 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
7950 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
7951 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
7952 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
7953 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
7954 CHECK_FIELD(fpu.au32RsrvdRest[i]);
7955 }
7956 CHECK_FIELD(rip);
7957 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
7958 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
7959 {
7960 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
7961 CHECK_BIT_FIELD(rflags.Bits.u1CF);
7962 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
7963 CHECK_BIT_FIELD(rflags.Bits.u1PF);
7964 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
7965 CHECK_BIT_FIELD(rflags.Bits.u1AF);
7966 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
7967 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
7968 CHECK_BIT_FIELD(rflags.Bits.u1SF);
7969 CHECK_BIT_FIELD(rflags.Bits.u1TF);
7970 CHECK_BIT_FIELD(rflags.Bits.u1IF);
7971 CHECK_BIT_FIELD(rflags.Bits.u1DF);
7972 CHECK_BIT_FIELD(rflags.Bits.u1OF);
7973 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
7974 CHECK_BIT_FIELD(rflags.Bits.u1NT);
7975 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
7976 CHECK_BIT_FIELD(rflags.Bits.u1RF);
7977 CHECK_BIT_FIELD(rflags.Bits.u1VM);
7978 CHECK_BIT_FIELD(rflags.Bits.u1AC);
7979 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
7980 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
7981 CHECK_BIT_FIELD(rflags.Bits.u1ID);
7982 }
7983
7984 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
7985 CHECK_FIELD(rax);
7986 CHECK_FIELD(rcx);
7987 if (!pIemCpu->fIgnoreRaxRdx)
7988 CHECK_FIELD(rdx);
7989 CHECK_FIELD(rbx);
7990 CHECK_FIELD(rsp);
7991 CHECK_FIELD(rbp);
7992 CHECK_FIELD(rsi);
7993 CHECK_FIELD(rdi);
7994 CHECK_FIELD(r8);
7995 CHECK_FIELD(r9);
7996 CHECK_FIELD(r10);
7997 CHECK_FIELD(r11);
7998 CHECK_FIELD(r12);
7999 CHECK_FIELD(r13);
8000 CHECK_SEL(cs);
8001 CHECK_SEL(ss);
8002 CHECK_SEL(ds);
8003 CHECK_SEL(es);
8004 CHECK_SEL(fs);
8005 CHECK_SEL(gs);
8006 CHECK_FIELD(cr0);
8007 CHECK_FIELD(cr2);
8008 CHECK_FIELD(cr3);
8009 CHECK_FIELD(cr4);
8010 CHECK_FIELD(dr[0]);
8011 CHECK_FIELD(dr[1]);
8012 CHECK_FIELD(dr[2]);
8013 CHECK_FIELD(dr[3]);
8014 CHECK_FIELD(dr[6]);
8015 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8016 CHECK_FIELD(dr[7]);
8017 CHECK_FIELD(gdtr.cbGdt);
8018 CHECK_FIELD(gdtr.pGdt);
8019 CHECK_FIELD(idtr.cbIdt);
8020 CHECK_FIELD(idtr.pIdt);
8021 CHECK_SEL(ldtr);
8022 CHECK_SEL(tr);
8023 CHECK_FIELD(SysEnter.cs);
8024 CHECK_FIELD(SysEnter.eip);
8025 CHECK_FIELD(SysEnter.esp);
8026 CHECK_FIELD(msrEFER);
8027 CHECK_FIELD(msrSTAR);
8028 CHECK_FIELD(msrPAT);
8029 CHECK_FIELD(msrLSTAR);
8030 CHECK_FIELD(msrCSTAR);
8031 CHECK_FIELD(msrSFMASK);
8032 CHECK_FIELD(msrKERNELGSBASE);
8033
8034 if (cDiffs != 0)
8035 {
8036 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
8037 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8038 iemVerifyAssertMsg2(pIemCpu);
8039 RTAssertPanic();
8040 }
8041# undef CHECK_FIELD
8042# undef CHECK_BIT_FIELD
8043 }
8044
8045 /*
8046 * If the register state compared fine, check the verification event
8047 * records.
8048 */
8049 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8050 {
8051 /*
8052 * Compare verficiation event records.
8053 * - I/O port accesses should be a 1:1 match.
8054 */
8055 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8056 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8057 while (pIemRec && pOtherRec)
8058 {
8059 /* Since we might miss RAM writes and reads, ignore reads and check
8060 that any written memory is the same extra ones. */
8061 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8062 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8063 && pIemRec->pNext)
8064 {
8065 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8066 iemVerifyWriteRecord(pIemCpu, pIemRec);
8067 pIemRec = pIemRec->pNext;
8068 }
8069
8070 /* Do the compare. */
8071 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8072 {
8073 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8074 break;
8075 }
8076 bool fEquals;
8077 switch (pIemRec->enmEvent)
8078 {
8079 case IEMVERIFYEVENT_IOPORT_READ:
8080 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8081 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8082 break;
8083 case IEMVERIFYEVENT_IOPORT_WRITE:
8084 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8085 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8086 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8087 break;
8088 case IEMVERIFYEVENT_RAM_READ:
8089 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8090 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8091 break;
8092 case IEMVERIFYEVENT_RAM_WRITE:
8093 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8094 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8095 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8096 break;
8097 default:
8098 fEquals = false;
8099 break;
8100 }
8101 if (!fEquals)
8102 {
8103 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8104 break;
8105 }
8106
8107 /* advance */
8108 pIemRec = pIemRec->pNext;
8109 pOtherRec = pOtherRec->pNext;
8110 }
8111
8112 /* Ignore extra writes and reads. */
8113 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8114 {
8115 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8116 iemVerifyWriteRecord(pIemCpu, pIemRec);
8117 pIemRec = pIemRec->pNext;
8118 }
8119 if (pIemRec != NULL)
8120 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8121 else if (pOtherRec != NULL)
8122 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
8123 }
8124 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8125}
8126
8127#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
8128
8129/* stubs */
8130static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8131{
8132 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8133 return VERR_INTERNAL_ERROR;
8134}
8135
8136static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8137{
8138 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8139 return VERR_INTERNAL_ERROR;
8140}
8141
8142#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
8143
8144
8145/**
8146 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8147 * IEMExecOneWithPrefetchedByPC.
8148 *
8149 * @return Strict VBox status code.
8150 * @param pVCpu The current virtual CPU.
8151 * @param pIemCpu The IEM per CPU data.
8152 */
8153DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu)
8154{
8155 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8156 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8157 if (rcStrict == VINF_SUCCESS)
8158 pIemCpu->cInstructions++;
8159//#ifdef DEBUG
8160// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8161//#endif
8162
8163 /* Execute the next instruction as well if a cli, pop ss or
8164 mov ss, Gr has just completed successfully. */
8165 if ( rcStrict == VINF_SUCCESS
8166 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8167 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8168 {
8169 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
8170 if (rcStrict == VINF_SUCCESS)
8171 {
8172 b; IEM_OPCODE_GET_NEXT_U8(&b);
8173 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8174 if (rcStrict == VINF_SUCCESS)
8175 pIemCpu->cInstructions++;
8176 }
8177 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8178 }
8179
8180 /*
8181 * Return value fiddling and statistics.
8182 */
8183 if (rcStrict != VINF_SUCCESS)
8184 {
8185 if (RT_SUCCESS(rcStrict))
8186 {
8187 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8188 int32_t const rcPassUp = pIemCpu->rcPassUp;
8189 if (rcPassUp == VINF_SUCCESS)
8190 pIemCpu->cRetInfStatuses++;
8191 else if ( rcPassUp < VINF_EM_FIRST
8192 || rcPassUp > VINF_EM_LAST
8193 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8194 {
8195 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8196 pIemCpu->cRetPassUpStatus++;
8197 rcStrict = rcPassUp;
8198 }
8199 else
8200 {
8201 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8202 pIemCpu->cRetInfStatuses++;
8203 }
8204 }
8205 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8206 pIemCpu->cRetAspectNotImplemented++;
8207 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8208 pIemCpu->cRetInstrNotImplemented++;
8209#ifdef IEM_VERIFICATION_MODE
8210 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8211 rcStrict = VINF_SUCCESS;
8212#endif
8213 else
8214 pIemCpu->cRetErrStatuses++;
8215 }
8216 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8217 {
8218 pIemCpu->cRetPassUpStatus++;
8219 rcStrict = pIemCpu->rcPassUp;
8220 }
8221
8222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8224#if defined(IEM_VERIFICATION_MODE)
8225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8229#endif
8230 return rcStrict;
8231}
8232
8233
8234/**
8235 * Execute one instruction.
8236 *
8237 * @return Strict VBox status code.
8238 * @param pVCpu The current virtual CPU.
8239 */
8240VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8241{
8242 PIEMCPU pIemCpu = &pVCpu->iem.s;
8243
8244#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
8245 iemExecVerificationModeSetup(pIemCpu);
8246#endif
8247#ifdef LOG_ENABLED
8248 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8249# ifdef IN_RING3
8250 if (LogIs2Enabled())
8251 {
8252 char szInstr[256];
8253 uint32_t cbInstr = 0;
8254 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
8255 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8256 szInstr, sizeof(szInstr), &cbInstr);
8257
8258 Log3(("**** "
8259 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8260 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8261 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8262 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8263 " %s\n"
8264 ,
8265 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8266 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8267 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8268 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8269 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8270 szInstr));
8271
8272 if (LogIs3Enabled())
8273 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
8274 }
8275 else
8276# endif
8277 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8278 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8279#endif
8280
8281 /*
8282 * Do the decoding and emulation.
8283 */
8284 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
8285 if (rcStrict == VINF_SUCCESS)
8286 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
8287
8288#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
8289 /*
8290 * Assert some sanity.
8291 */
8292 iemExecVerificationModeCheck(pIemCpu);
8293#endif
8294 if (rcStrict != VINF_SUCCESS)
8295 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8296 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8297 return rcStrict;
8298}
8299
8300
8301VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8302{
8303 PIEMCPU pIemCpu = &pVCpu->iem.s;
8304 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8305 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8306
8307 iemInitDecoder(pIemCpu);
8308 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8309
8310 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
8311 if (rcStrict == VINF_SUCCESS)
8312 {
8313 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
8314 if (pcbWritten)
8315 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8316 }
8317 return rcStrict;
8318}
8319
8320
8321VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8322 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8323{
8324 PIEMCPU pIemCpu = &pVCpu->iem.s;
8325 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8326 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8327
8328 VBOXSTRICTRC rcStrict;
8329 if ( cbOpcodeBytes
8330 && pCtx->rip == OpcodeBytesPC)
8331 {
8332 iemInitDecoder(pIemCpu);
8333 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8334 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8335 rcStrict = VINF_SUCCESS;
8336 }
8337 else
8338 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
8339 if (rcStrict == VINF_SUCCESS)
8340 {
8341 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
8342 }
8343 return rcStrict;
8344}
8345
8346
8347VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8348{
8349 return IEMExecOne(pVCpu);
8350}
8351
8352
8353
8354/**
8355 * Injects a trap, fault, abort, software interrupt or external interrupt.
8356 *
8357 * The parameter list matches TRPMQueryTrapAll pretty closely.
8358 *
8359 * @returns Strict VBox status code.
8360 * @param pVCpu The current virtual CPU.
8361 * @param u8TrapNo The trap number.
8362 * @param enmType What type is it (trap/fault/abort), software
8363 * interrupt or hardware interrupt.
8364 * @param uErrCode The error code if applicable.
8365 * @param uCr2 The CR2 value if applicable.
8366 */
8367VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8368{
8369 iemInitDecoder(&pVCpu->iem.s);
8370
8371 uint32_t fFlags;
8372 switch (enmType)
8373 {
8374 case TRPM_HARDWARE_INT:
8375 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8376 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8377 uErrCode = uCr2 = 0;
8378 break;
8379
8380 case TRPM_SOFTWARE_INT:
8381 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8382 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8383 uErrCode = uCr2 = 0;
8384 break;
8385
8386 case TRPM_TRAP:
8387 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8388 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8389 if (u8TrapNo == X86_XCPT_PF)
8390 fFlags |= IEM_XCPT_FLAGS_CR2;
8391 switch (u8TrapNo)
8392 {
8393 case X86_XCPT_DF:
8394 case X86_XCPT_TS:
8395 case X86_XCPT_NP:
8396 case X86_XCPT_SS:
8397 case X86_XCPT_PF:
8398 case X86_XCPT_AC:
8399 fFlags |= IEM_XCPT_FLAGS_ERR;
8400 break;
8401 }
8402 break;
8403
8404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8405 }
8406
8407 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8408}
8409
8410
8411VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8412{
8413 return VERR_NOT_IMPLEMENTED;
8414}
8415
8416
8417VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8418{
8419 return VERR_NOT_IMPLEMENTED;
8420}
8421
8422
8423#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8424/**
8425 * Executes a IRET instruction with default operand size.
8426 *
8427 * This is for PATM.
8428 *
8429 * @returns VBox status code.
8430 * @param pVCpu The current virtual CPU.
8431 * @param pCtxCore The register frame.
8432 */
8433VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8434{
8435 PIEMCPU pIemCpu = &pVCpu->iem.s;
8436 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8437
8438 iemCtxCoreToCtx(pCtx, pCtxCore);
8439 iemInitDecoder(pIemCpu);
8440 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8441 if (rcStrict == VINF_SUCCESS)
8442 iemCtxToCtxCore(pCtxCore, pCtx);
8443 else
8444 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8445 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8446 return rcStrict;
8447}
8448#endif
8449
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette