VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 42705

Last change on this file since 42705 was 42704, checked in by vboxsync, 12 years ago

IEM: Eflags fixes, added API variants that bypasses access handlers, ...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 311.8 KB
Line 
1/* $Id: IEMAll.cpp 42704 2012-08-09 08:03:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/*******************************************************************************
74* Header Files *
75*******************************************************************************/
76#define LOG_GROUP LOG_GROUP_IEM
77#include <VBox/vmm/iem.h>
78#include <VBox/vmm/cpum.h>
79#include <VBox/vmm/pgm.h>
80#include <internal/pgm.h>
81#include <VBox/vmm/iom.h>
82#include <VBox/vmm/em.h>
83#include <VBox/vmm/tm.h>
84#include <VBox/vmm/dbgf.h>
85#ifdef VBOX_WITH_RAW_MODE_NOT_R0
86# include <VBox/vmm/patm.h>
87#endif
88#ifdef IEM_VERIFICATION_MODE
89# include <VBox/vmm/rem.h>
90# include <VBox/vmm/mm.h>
91#endif
92#include "IEMInternal.h"
93#include <VBox/vmm/vm.h>
94#include <VBox/log.h>
95#include <VBox/err.h>
96#include <VBox/param.h>
97#include <iprt/assert.h>
98#include <iprt/string.h>
99#include <iprt/x86.h>
100
101
102/*******************************************************************************
103* Structures and Typedefs *
104*******************************************************************************/
105/** @typedef PFNIEMOP
106 * Pointer to an opcode decoder function.
107 */
108
109/** @def FNIEMOP_DEF
110 * Define an opcode decoder function.
111 *
112 * We're using macors for this so that adding and removing parameters as well as
113 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
114 *
115 * @param a_Name The function name.
116 */
117
118
119#if defined(__GNUC__) && defined(RT_ARCH_X86)
120typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
121# define FNIEMOP_DEF(a_Name) \
122 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
123# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
124 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
125# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
126 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
127
128#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
129typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
130# define FNIEMOP_DEF(a_Name) \
131 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
132# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
133 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
134# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
135 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
136
137#elif defined(__GNUC__)
138typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
139# define FNIEMOP_DEF(a_Name) \
140 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
141# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
142 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
143# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
144 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
145
146#else
147typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
148# define FNIEMOP_DEF(a_Name) \
149 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
150# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
151 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
152# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
153 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
154
155#endif
156
157
158/**
159 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
160 */
161typedef union IEMSELDESC
162{
163 /** The legacy view. */
164 X86DESC Legacy;
165 /** The long mode view. */
166 X86DESC64 Long;
167} IEMSELDESC;
168/** Pointer to a selector descriptor table entry. */
169typedef IEMSELDESC *PIEMSELDESC;
170
171
172/*******************************************************************************
173* Defined Constants And Macros *
174*******************************************************************************/
175/** @def IEM_LOG_MEMORY_ACCESS
176 * Can be used to log memory accesses when debugging problematic guest behavior. */
177#if 0
178# define IEM_LOG_MEMORY_ACCESS
179#endif
180
181/** @name IEM status codes.
182 *
183 * Not quite sure how this will play out in the end, just aliasing safe status
184 * codes for now.
185 *
186 * @{ */
187#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
188/** @} */
189
190/** Temporary hack to disable the double execution. Will be removed in favor
191 * of a dedicated execution mode in EM. */
192//#define IEM_VERIFICATION_MODE_NO_REM
193
194/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
195 * due to GCC lacking knowledge about the value range of a switch. */
196#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
197
198/**
199 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
200 * occation.
201 */
202#ifdef LOG_ENABLED
203# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
204 do { \
205 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
206 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
207 } while (0)
208#else
209# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
210 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
211#endif
212
213/**
214 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
215 * occation using the supplied logger statement.
216 *
217 * @param a_LoggerArgs What to log on failure.
218 */
219#ifdef LOG_ENABLED
220# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
221 do { \
222 LogFunc(a_LoggerArgs); \
223 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
224 } while (0)
225#else
226# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
227 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
228#endif
229
230/**
231 * Call an opcode decoder function.
232 *
233 * We're using macors for this so that adding and removing parameters can be
234 * done as we please. See FNIEMOP_DEF.
235 */
236#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
237
238/**
239 * Call a common opcode decoder function taking one extra argument.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF_1.
243 */
244#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
253
254/**
255 * Check if we're currently executing in real or virtual 8086 mode.
256 *
257 * @returns @c true if it is, @c false if not.
258 * @param a_pIemCpu The IEM state of the current CPU.
259 */
260#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
261
262/**
263 * Check if we're currently executing in long mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in real mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
280 */
281#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
282
283/**
284 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
285 */
286#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
287
288/**
289 * Tests if at least on of the specified AMD CPUID features (extended) are
290 * marked present.
291 */
292#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
293
294/**
295 * Checks if an Intel CPUID feature is present.
296 */
297#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
298 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
299 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
300
301/**
302 * Evaluates to true if we're presenting an Intel CPU to the guest.
303 */
304#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
305
306/**
307 * Evaluates to true if we're presenting an AMD CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
310
311/**
312 * Check if the address is canonical.
313 */
314#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
315
316
317/*******************************************************************************
318* Global Variables *
319*******************************************************************************/
320extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
321
322
323/** Function table for the ADD instruction. */
324static const IEMOPBINSIZES g_iemAImpl_add =
325{
326 iemAImpl_add_u8, iemAImpl_add_u8_locked,
327 iemAImpl_add_u16, iemAImpl_add_u16_locked,
328 iemAImpl_add_u32, iemAImpl_add_u32_locked,
329 iemAImpl_add_u64, iemAImpl_add_u64_locked
330};
331
332/** Function table for the ADC instruction. */
333static const IEMOPBINSIZES g_iemAImpl_adc =
334{
335 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
336 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
337 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
338 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
339};
340
341/** Function table for the SUB instruction. */
342static const IEMOPBINSIZES g_iemAImpl_sub =
343{
344 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
345 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
346 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
347 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
348};
349
350/** Function table for the SBB instruction. */
351static const IEMOPBINSIZES g_iemAImpl_sbb =
352{
353 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
354 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
355 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
356 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
357};
358
359/** Function table for the OR instruction. */
360static const IEMOPBINSIZES g_iemAImpl_or =
361{
362 iemAImpl_or_u8, iemAImpl_or_u8_locked,
363 iemAImpl_or_u16, iemAImpl_or_u16_locked,
364 iemAImpl_or_u32, iemAImpl_or_u32_locked,
365 iemAImpl_or_u64, iemAImpl_or_u64_locked
366};
367
368/** Function table for the XOR instruction. */
369static const IEMOPBINSIZES g_iemAImpl_xor =
370{
371 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
372 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
373 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
374 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
375};
376
377/** Function table for the AND instruction. */
378static const IEMOPBINSIZES g_iemAImpl_and =
379{
380 iemAImpl_and_u8, iemAImpl_and_u8_locked,
381 iemAImpl_and_u16, iemAImpl_and_u16_locked,
382 iemAImpl_and_u32, iemAImpl_and_u32_locked,
383 iemAImpl_and_u64, iemAImpl_and_u64_locked
384};
385
386/** Function table for the CMP instruction.
387 * @remarks Making operand order ASSUMPTIONS.
388 */
389static const IEMOPBINSIZES g_iemAImpl_cmp =
390{
391 iemAImpl_cmp_u8, NULL,
392 iemAImpl_cmp_u16, NULL,
393 iemAImpl_cmp_u32, NULL,
394 iemAImpl_cmp_u64, NULL
395};
396
397/** Function table for the TEST instruction.
398 * @remarks Making operand order ASSUMPTIONS.
399 */
400static const IEMOPBINSIZES g_iemAImpl_test =
401{
402 iemAImpl_test_u8, NULL,
403 iemAImpl_test_u16, NULL,
404 iemAImpl_test_u32, NULL,
405 iemAImpl_test_u64, NULL
406};
407
408/** Function table for the BT instruction. */
409static const IEMOPBINSIZES g_iemAImpl_bt =
410{
411 NULL, NULL,
412 iemAImpl_bt_u16, NULL,
413 iemAImpl_bt_u32, NULL,
414 iemAImpl_bt_u64, NULL
415};
416
417/** Function table for the BTC instruction. */
418static const IEMOPBINSIZES g_iemAImpl_btc =
419{
420 NULL, NULL,
421 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
422 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
423 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
424};
425
426/** Function table for the BTR instruction. */
427static const IEMOPBINSIZES g_iemAImpl_btr =
428{
429 NULL, NULL,
430 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
431 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
432 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
433};
434
435/** Function table for the BTS instruction. */
436static const IEMOPBINSIZES g_iemAImpl_bts =
437{
438 NULL, NULL,
439 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
440 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
441 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
442};
443
444/** Function table for the BSF instruction. */
445static const IEMOPBINSIZES g_iemAImpl_bsf =
446{
447 NULL, NULL,
448 iemAImpl_bsf_u16, NULL,
449 iemAImpl_bsf_u32, NULL,
450 iemAImpl_bsf_u64, NULL
451};
452
453/** Function table for the BSR instruction. */
454static const IEMOPBINSIZES g_iemAImpl_bsr =
455{
456 NULL, NULL,
457 iemAImpl_bsr_u16, NULL,
458 iemAImpl_bsr_u32, NULL,
459 iemAImpl_bsr_u64, NULL
460};
461
462/** Function table for the IMUL instruction. */
463static const IEMOPBINSIZES g_iemAImpl_imul_two =
464{
465 NULL, NULL,
466 iemAImpl_imul_two_u16, NULL,
467 iemAImpl_imul_two_u32, NULL,
468 iemAImpl_imul_two_u64, NULL
469};
470
471/** Group 1 /r lookup table. */
472static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
473{
474 &g_iemAImpl_add,
475 &g_iemAImpl_or,
476 &g_iemAImpl_adc,
477 &g_iemAImpl_sbb,
478 &g_iemAImpl_and,
479 &g_iemAImpl_sub,
480 &g_iemAImpl_xor,
481 &g_iemAImpl_cmp
482};
483
484/** Function table for the INC instruction. */
485static const IEMOPUNARYSIZES g_iemAImpl_inc =
486{
487 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
488 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
489 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
490 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
491};
492
493/** Function table for the DEC instruction. */
494static const IEMOPUNARYSIZES g_iemAImpl_dec =
495{
496 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
497 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
498 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
499 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
500};
501
502/** Function table for the NEG instruction. */
503static const IEMOPUNARYSIZES g_iemAImpl_neg =
504{
505 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
506 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
507 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
508 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
509};
510
511/** Function table for the NOT instruction. */
512static const IEMOPUNARYSIZES g_iemAImpl_not =
513{
514 iemAImpl_not_u8, iemAImpl_not_u8_locked,
515 iemAImpl_not_u16, iemAImpl_not_u16_locked,
516 iemAImpl_not_u32, iemAImpl_not_u32_locked,
517 iemAImpl_not_u64, iemAImpl_not_u64_locked
518};
519
520
521/** Function table for the ROL instruction. */
522static const IEMOPSHIFTSIZES g_iemAImpl_rol =
523{
524 iemAImpl_rol_u8,
525 iemAImpl_rol_u16,
526 iemAImpl_rol_u32,
527 iemAImpl_rol_u64
528};
529
530/** Function table for the ROR instruction. */
531static const IEMOPSHIFTSIZES g_iemAImpl_ror =
532{
533 iemAImpl_ror_u8,
534 iemAImpl_ror_u16,
535 iemAImpl_ror_u32,
536 iemAImpl_ror_u64
537};
538
539/** Function table for the RCL instruction. */
540static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
541{
542 iemAImpl_rcl_u8,
543 iemAImpl_rcl_u16,
544 iemAImpl_rcl_u32,
545 iemAImpl_rcl_u64
546};
547
548/** Function table for the RCR instruction. */
549static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
550{
551 iemAImpl_rcr_u8,
552 iemAImpl_rcr_u16,
553 iemAImpl_rcr_u32,
554 iemAImpl_rcr_u64
555};
556
557/** Function table for the SHL instruction. */
558static const IEMOPSHIFTSIZES g_iemAImpl_shl =
559{
560 iemAImpl_shl_u8,
561 iemAImpl_shl_u16,
562 iemAImpl_shl_u32,
563 iemAImpl_shl_u64
564};
565
566/** Function table for the SHR instruction. */
567static const IEMOPSHIFTSIZES g_iemAImpl_shr =
568{
569 iemAImpl_shr_u8,
570 iemAImpl_shr_u16,
571 iemAImpl_shr_u32,
572 iemAImpl_shr_u64
573};
574
575/** Function table for the SAR instruction. */
576static const IEMOPSHIFTSIZES g_iemAImpl_sar =
577{
578 iemAImpl_sar_u8,
579 iemAImpl_sar_u16,
580 iemAImpl_sar_u32,
581 iemAImpl_sar_u64
582};
583
584
585/** Function table for the MUL instruction. */
586static const IEMOPMULDIVSIZES g_iemAImpl_mul =
587{
588 iemAImpl_mul_u8,
589 iemAImpl_mul_u16,
590 iemAImpl_mul_u32,
591 iemAImpl_mul_u64
592};
593
594/** Function table for the IMUL instruction working implicitly on rAX. */
595static const IEMOPMULDIVSIZES g_iemAImpl_imul =
596{
597 iemAImpl_imul_u8,
598 iemAImpl_imul_u16,
599 iemAImpl_imul_u32,
600 iemAImpl_imul_u64
601};
602
603/** Function table for the DIV instruction. */
604static const IEMOPMULDIVSIZES g_iemAImpl_div =
605{
606 iemAImpl_div_u8,
607 iemAImpl_div_u16,
608 iemAImpl_div_u32,
609 iemAImpl_div_u64
610};
611
612/** Function table for the MUL instruction. */
613static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
614{
615 iemAImpl_idiv_u8,
616 iemAImpl_idiv_u16,
617 iemAImpl_idiv_u32,
618 iemAImpl_idiv_u64
619};
620
621/** Function table for the SHLD instruction */
622static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
623{
624 iemAImpl_shld_u16,
625 iemAImpl_shld_u32,
626 iemAImpl_shld_u64,
627};
628
629/** Function table for the SHRD instruction */
630static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
631{
632 iemAImpl_shrd_u16,
633 iemAImpl_shrd_u32,
634 iemAImpl_shrd_u64,
635};
636
637
638/*******************************************************************************
639* Internal Functions *
640*******************************************************************************/
641static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
642/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
643static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
644static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
645static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
646static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
647static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
648static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
649static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
650static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
651static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
652static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
653static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
654static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
655static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
656static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
657static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
658static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
659static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
660static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
661static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
662static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
663static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
664static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
665
666#ifdef IEM_VERIFICATION_MODE
667static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
668#endif
669static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
670static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
671
672
673/**
674 * Sets the pass up status.
675 *
676 * @returns VINF_SUCCESS.
677 * @param pIemCpu The per CPU IEM state of the calling thread.
678 * @param rcPassUp The pass up status. Must be informational.
679 * VINF_SUCCESS is not allowed.
680 */
681static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
682{
683 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
684
685 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
686 if (rcOldPassUp == VINF_SUCCESS)
687 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
688 /* If both are EM scheduling code, use EM priority rules. */
689 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
690 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
691 {
692 if (rcPassUp < rcOldPassUp)
693 {
694 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
695 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
696 }
697 else
698 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
699 }
700 /* Override EM scheduling with specific status code. */
701 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
702 {
703 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
704 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
705 }
706 /* Don't override specific status code, first come first served. */
707 else
708 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
709 return VINF_SUCCESS;
710}
711
712
713/**
714 * Initializes the decoder state.
715 *
716 * @param pIemCpu The per CPU IEM state.
717 * @param fBypassHandlers Whether to bypass access handlers.
718 */
719DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
720{
721 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
722 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
723
724#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
725 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
726 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
727 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
728 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
729 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
730 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
731 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
732 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
733#endif
734
735#ifdef VBOX_WITH_RAW_MODE_NOT_R0
736 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
737#endif
738 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
739#ifdef IEM_VERIFICATION_MODE
740 if (pIemCpu->uInjectCpl != UINT8_MAX)
741 pIemCpu->uCpl = pIemCpu->uInjectCpl;
742#endif
743 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
744 ? IEMMODE_64BIT
745 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
746 ? IEMMODE_32BIT
747 : IEMMODE_16BIT;
748 pIemCpu->enmCpuMode = enmMode;
749 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
750 pIemCpu->enmEffAddrMode = enmMode;
751 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
752 pIemCpu->enmEffOpSize = enmMode;
753 pIemCpu->fPrefixes = 0;
754 pIemCpu->uRexReg = 0;
755 pIemCpu->uRexB = 0;
756 pIemCpu->uRexIndex = 0;
757 pIemCpu->iEffSeg = X86_SREG_DS;
758 pIemCpu->offOpcode = 0;
759 pIemCpu->cbOpcode = 0;
760 pIemCpu->cActiveMappings = 0;
761 pIemCpu->iNextMapping = 0;
762 pIemCpu->rcPassUp = VINF_SUCCESS;
763 pIemCpu->fBypassHandlers = fBypassHandlers;
764
765}
766
767
768/**
769 * Prefetch opcodes the first time when starting executing.
770 *
771 * @returns Strict VBox status code.
772 * @param pIemCpu The IEM state.
773 * @param fBypassHandlers Whether to bypass access handlers.
774 */
775static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
776{
777#ifdef IEM_VERIFICATION_MODE
778 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
779#endif
780 iemInitDecoder(pIemCpu, fBypassHandlers);
781
782 /*
783 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
784 *
785 * First translate CS:rIP to a physical address.
786 */
787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
788 uint32_t cbToTryRead;
789 RTGCPTR GCPtrPC;
790 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
791 {
792 cbToTryRead = PAGE_SIZE;
793 GCPtrPC = pCtx->rip;
794 if (!IEM_IS_CANONICAL(GCPtrPC))
795 return iemRaiseGeneralProtectionFault0(pIemCpu);
796 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
797 }
798 else
799 {
800 uint32_t GCPtrPC32 = pCtx->eip;
801 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
802 if (GCPtrPC32 > pCtx->cs.u32Limit)
803 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
804 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
805 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
806 }
807
808#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
809 /* Allow interpretation of patch manager code blocks since they can for
810 instance throw #PFs for perfectly good reasons. */
811 if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1
812 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC))
813 {
814 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
815 if (cbToTryRead > cbLeftOnPage)
816 cbToTryRead = cbLeftOnPage;
817 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
818 cbToTryRead = sizeof(pIemCpu->abOpcode);
819 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
820 pIemCpu->cbOpcode = cbToTryRead;
821 return VINF_SUCCESS;
822 }
823#endif
824
825 RTGCPHYS GCPhys;
826 uint64_t fFlags;
827 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
828 if (RT_FAILURE(rc))
829 {
830 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
831 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
832 }
833 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
834 {
835 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
836 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
837 }
838 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
839 {
840 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
841 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
842 }
843 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
844 /** @todo Check reserved bits and such stuff. PGM is better at doing
845 * that, so do it when implementing the guest virtual address
846 * TLB... */
847
848#ifdef IEM_VERIFICATION_MODE
849 /*
850 * Optimistic optimization: Use unconsumed opcode bytes from the previous
851 * instruction.
852 */
853 /** @todo optimize this differently by not using PGMPhysRead. */
854 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
855 pIemCpu->GCPhysOpcodes = GCPhys;
856 if ( offPrevOpcodes < cbOldOpcodes
857 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
858 {
859 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
860 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
861 pIemCpu->cbOpcode = cbNew;
862 return VINF_SUCCESS;
863 }
864#endif
865
866 /*
867 * Read the bytes at this address.
868 */
869 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
870 if (cbToTryRead > cbLeftOnPage)
871 cbToTryRead = cbLeftOnPage;
872 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
873 cbToTryRead = sizeof(pIemCpu->abOpcode);
874 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
875 * doing that. */
876 if (!pIemCpu->fBypassHandlers)
877 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
878 else
879 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
880 if (rc != VINF_SUCCESS)
881 {
882 /** @todo status code handling */
883 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
884 GCPtrPC, GCPhys, rc, cbToTryRead));
885 return rc;
886 }
887 pIemCpu->cbOpcode = cbToTryRead;
888
889 return VINF_SUCCESS;
890}
891
892
893/**
894 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
895 * exception if it fails.
896 *
897 * @returns Strict VBox status code.
898 * @param pIemCpu The IEM state.
899 * @param cbMin Where to return the opcode byte.
900 */
901static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
902{
903 /*
904 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
905 *
906 * First translate CS:rIP to a physical address.
907 */
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
910 uint32_t cbToTryRead;
911 RTGCPTR GCPtrNext;
912 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
913 {
914 cbToTryRead = PAGE_SIZE;
915 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
916 if (!IEM_IS_CANONICAL(GCPtrNext))
917 return iemRaiseGeneralProtectionFault0(pIemCpu);
918 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
919 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
920 }
921 else
922 {
923 uint32_t GCPtrNext32 = pCtx->eip;
924 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
925 GCPtrNext32 += pIemCpu->cbOpcode;
926 if (GCPtrNext32 > pCtx->cs.u32Limit)
927 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
928 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
929 if (cbToTryRead < cbMin - cbLeft)
930 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
931 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
932 }
933
934 RTGCPHYS GCPhys;
935 uint64_t fFlags;
936 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
937 if (RT_FAILURE(rc))
938 {
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
940 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
941 }
942 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
943 {
944 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
945 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
946 }
947 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
948 {
949 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
950 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
951 }
952 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
953 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
954 /** @todo Check reserved bits and such stuff. PGM is better at doing
955 * that, so do it when implementing the guest virtual address
956 * TLB... */
957
958 /*
959 * Read the bytes at this address.
960 */
961 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
962 if (cbToTryRead > cbLeftOnPage)
963 cbToTryRead = cbLeftOnPage;
964 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
965 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
966 Assert(cbToTryRead >= cbMin - cbLeft);
967 if (!pIemCpu->fBypassHandlers)
968 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
969 else
970 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
971 if (rc != VINF_SUCCESS)
972 {
973 /** @todo status code handling */
974 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
975 return rc;
976 }
977 pIemCpu->cbOpcode += cbToTryRead;
978 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
979
980 return VINF_SUCCESS;
981}
982
983
984/**
985 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
986 *
987 * @returns Strict VBox status code.
988 * @param pIemCpu The IEM state.
989 * @param pb Where to return the opcode byte.
990 */
991DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
992{
993 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
994 if (rcStrict == VINF_SUCCESS)
995 {
996 uint8_t offOpcode = pIemCpu->offOpcode;
997 *pb = pIemCpu->abOpcode[offOpcode];
998 pIemCpu->offOpcode = offOpcode + 1;
999 }
1000 else
1001 *pb = 0;
1002 return rcStrict;
1003}
1004
1005
1006/**
1007 * Fetches the next opcode byte.
1008 *
1009 * @returns Strict VBox status code.
1010 * @param pIemCpu The IEM state.
1011 * @param pu8 Where to return the opcode byte.
1012 */
1013DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1014{
1015 uint8_t const offOpcode = pIemCpu->offOpcode;
1016 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1017 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1018
1019 *pu8 = pIemCpu->abOpcode[offOpcode];
1020 pIemCpu->offOpcode = offOpcode + 1;
1021 return VINF_SUCCESS;
1022}
1023
1024
1025/**
1026 * Fetches the next opcode byte, returns automatically on failure.
1027 *
1028 * @param a_pu8 Where to return the opcode byte.
1029 * @remark Implicitly references pIemCpu.
1030 */
1031#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1032 do \
1033 { \
1034 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1035 if (rcStrict2 != VINF_SUCCESS) \
1036 return rcStrict2; \
1037 } while (0)
1038
1039
1040/**
1041 * Fetches the next signed byte from the opcode stream.
1042 *
1043 * @returns Strict VBox status code.
1044 * @param pIemCpu The IEM state.
1045 * @param pi8 Where to return the signed byte.
1046 */
1047DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1048{
1049 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1050}
1051
1052
1053/**
1054 * Fetches the next signed byte from the opcode stream, returning automatically
1055 * on failure.
1056 *
1057 * @param pi8 Where to return the signed byte.
1058 * @remark Implicitly references pIemCpu.
1059 */
1060#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1061 do \
1062 { \
1063 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1064 if (rcStrict2 != VINF_SUCCESS) \
1065 return rcStrict2; \
1066 } while (0)
1067
1068
1069/**
1070 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1071 *
1072 * @returns Strict VBox status code.
1073 * @param pIemCpu The IEM state.
1074 * @param pu16 Where to return the opcode dword.
1075 */
1076DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1077{
1078 uint8_t u8;
1079 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1080 if (rcStrict == VINF_SUCCESS)
1081 *pu16 = (int8_t)u8;
1082 return rcStrict;
1083}
1084
1085
1086/**
1087 * Fetches the next signed byte from the opcode stream, extending it to
1088 * unsigned 16-bit.
1089 *
1090 * @returns Strict VBox status code.
1091 * @param pIemCpu The IEM state.
1092 * @param pu16 Where to return the unsigned word.
1093 */
1094DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1095{
1096 uint8_t const offOpcode = pIemCpu->offOpcode;
1097 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1098 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1099
1100 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1101 pIemCpu->offOpcode = offOpcode + 1;
1102 return VINF_SUCCESS;
1103}
1104
1105
1106/**
1107 * Fetches the next signed byte from the opcode stream and sign-extending it to
1108 * a word, returning automatically on failure.
1109 *
1110 * @param pu16 Where to return the word.
1111 * @remark Implicitly references pIemCpu.
1112 */
1113#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1114 do \
1115 { \
1116 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1117 if (rcStrict2 != VINF_SUCCESS) \
1118 return rcStrict2; \
1119 } while (0)
1120
1121
1122/**
1123 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1124 *
1125 * @returns Strict VBox status code.
1126 * @param pIemCpu The IEM state.
1127 * @param pu32 Where to return the opcode dword.
1128 */
1129DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1130{
1131 uint8_t u8;
1132 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1133 if (rcStrict == VINF_SUCCESS)
1134 *pu32 = (int8_t)u8;
1135 return rcStrict;
1136}
1137
1138
1139/**
1140 * Fetches the next signed byte from the opcode stream, extending it to
1141 * unsigned 32-bit.
1142 *
1143 * @returns Strict VBox status code.
1144 * @param pIemCpu The IEM state.
1145 * @param pu32 Where to return the unsigned dword.
1146 */
1147DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1148{
1149 uint8_t const offOpcode = pIemCpu->offOpcode;
1150 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1151 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1152
1153 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1154 pIemCpu->offOpcode = offOpcode + 1;
1155 return VINF_SUCCESS;
1156}
1157
1158
1159/**
1160 * Fetches the next signed byte from the opcode stream and sign-extending it to
1161 * a word, returning automatically on failure.
1162 *
1163 * @param pu32 Where to return the word.
1164 * @remark Implicitly references pIemCpu.
1165 */
1166#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1167 do \
1168 { \
1169 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1170 if (rcStrict2 != VINF_SUCCESS) \
1171 return rcStrict2; \
1172 } while (0)
1173
1174
1175/**
1176 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1177 *
1178 * @returns Strict VBox status code.
1179 * @param pIemCpu The IEM state.
1180 * @param pu64 Where to return the opcode qword.
1181 */
1182DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1183{
1184 uint8_t u8;
1185 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1186 if (rcStrict == VINF_SUCCESS)
1187 *pu64 = (int8_t)u8;
1188 return rcStrict;
1189}
1190
1191
1192/**
1193 * Fetches the next signed byte from the opcode stream, extending it to
1194 * unsigned 64-bit.
1195 *
1196 * @returns Strict VBox status code.
1197 * @param pIemCpu The IEM state.
1198 * @param pu64 Where to return the unsigned qword.
1199 */
1200DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1201{
1202 uint8_t const offOpcode = pIemCpu->offOpcode;
1203 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1204 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1205
1206 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1207 pIemCpu->offOpcode = offOpcode + 1;
1208 return VINF_SUCCESS;
1209}
1210
1211
1212/**
1213 * Fetches the next signed byte from the opcode stream and sign-extending it to
1214 * a word, returning automatically on failure.
1215 *
1216 * @param pu64 Where to return the word.
1217 * @remark Implicitly references pIemCpu.
1218 */
1219#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1220 do \
1221 { \
1222 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1223 if (rcStrict2 != VINF_SUCCESS) \
1224 return rcStrict2; \
1225 } while (0)
1226
1227
1228/**
1229 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1230 *
1231 * @returns Strict VBox status code.
1232 * @param pIemCpu The IEM state.
1233 * @param pu16 Where to return the opcode word.
1234 */
1235DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1236{
1237 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1238 if (rcStrict == VINF_SUCCESS)
1239 {
1240 uint8_t offOpcode = pIemCpu->offOpcode;
1241 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1242 pIemCpu->offOpcode = offOpcode + 2;
1243 }
1244 else
1245 *pu16 = 0;
1246 return rcStrict;
1247}
1248
1249
1250/**
1251 * Fetches the next opcode word.
1252 *
1253 * @returns Strict VBox status code.
1254 * @param pIemCpu The IEM state.
1255 * @param pu16 Where to return the opcode word.
1256 */
1257DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1258{
1259 uint8_t const offOpcode = pIemCpu->offOpcode;
1260 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1261 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1262
1263 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1264 pIemCpu->offOpcode = offOpcode + 2;
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Fetches the next opcode word, returns automatically on failure.
1271 *
1272 * @param a_pu16 Where to return the opcode word.
1273 * @remark Implicitly references pIemCpu.
1274 */
1275#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1276 do \
1277 { \
1278 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1279 if (rcStrict2 != VINF_SUCCESS) \
1280 return rcStrict2; \
1281 } while (0)
1282
1283
1284/**
1285 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1286 *
1287 * @returns Strict VBox status code.
1288 * @param pIemCpu The IEM state.
1289 * @param pu32 Where to return the opcode double word.
1290 */
1291DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1292{
1293 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1294 if (rcStrict == VINF_SUCCESS)
1295 {
1296 uint8_t offOpcode = pIemCpu->offOpcode;
1297 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1298 pIemCpu->offOpcode = offOpcode + 2;
1299 }
1300 else
1301 *pu32 = 0;
1302 return rcStrict;
1303}
1304
1305
1306/**
1307 * Fetches the next opcode word, zero extending it to a double word.
1308 *
1309 * @returns Strict VBox status code.
1310 * @param pIemCpu The IEM state.
1311 * @param pu32 Where to return the opcode double word.
1312 */
1313DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1314{
1315 uint8_t const offOpcode = pIemCpu->offOpcode;
1316 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1317 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1318
1319 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1320 pIemCpu->offOpcode = offOpcode + 2;
1321 return VINF_SUCCESS;
1322}
1323
1324
1325/**
1326 * Fetches the next opcode word and zero extends it to a double word, returns
1327 * automatically on failure.
1328 *
1329 * @param a_pu32 Where to return the opcode double word.
1330 * @remark Implicitly references pIemCpu.
1331 */
1332#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1333 do \
1334 { \
1335 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1336 if (rcStrict2 != VINF_SUCCESS) \
1337 return rcStrict2; \
1338 } while (0)
1339
1340
1341/**
1342 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1343 *
1344 * @returns Strict VBox status code.
1345 * @param pIemCpu The IEM state.
1346 * @param pu64 Where to return the opcode quad word.
1347 */
1348DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1349{
1350 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1351 if (rcStrict == VINF_SUCCESS)
1352 {
1353 uint8_t offOpcode = pIemCpu->offOpcode;
1354 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1355 pIemCpu->offOpcode = offOpcode + 2;
1356 }
1357 else
1358 *pu64 = 0;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Fetches the next opcode word, zero extending it to a quad word.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pIemCpu The IEM state.
1368 * @param pu64 Where to return the opcode quad word.
1369 */
1370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1371{
1372 uint8_t const offOpcode = pIemCpu->offOpcode;
1373 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1374 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1375
1376 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1377 pIemCpu->offOpcode = offOpcode + 2;
1378 return VINF_SUCCESS;
1379}
1380
1381
1382/**
1383 * Fetches the next opcode word and zero extends it to a quad word, returns
1384 * automatically on failure.
1385 *
1386 * @param a_pu64 Where to return the opcode quad word.
1387 * @remark Implicitly references pIemCpu.
1388 */
1389#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1390 do \
1391 { \
1392 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1393 if (rcStrict2 != VINF_SUCCESS) \
1394 return rcStrict2; \
1395 } while (0)
1396
1397
1398/**
1399 * Fetches the next signed word from the opcode stream.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pIemCpu The IEM state.
1403 * @param pi16 Where to return the signed word.
1404 */
1405DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1406{
1407 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1408}
1409
1410
1411/**
1412 * Fetches the next signed word from the opcode stream, returning automatically
1413 * on failure.
1414 *
1415 * @param pi16 Where to return the signed word.
1416 * @remark Implicitly references pIemCpu.
1417 */
1418#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1419 do \
1420 { \
1421 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1422 if (rcStrict2 != VINF_SUCCESS) \
1423 return rcStrict2; \
1424 } while (0)
1425
1426
1427/**
1428 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1429 *
1430 * @returns Strict VBox status code.
1431 * @param pIemCpu The IEM state.
1432 * @param pu32 Where to return the opcode dword.
1433 */
1434DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1435{
1436 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1437 if (rcStrict == VINF_SUCCESS)
1438 {
1439 uint8_t offOpcode = pIemCpu->offOpcode;
1440 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1441 pIemCpu->abOpcode[offOpcode + 1],
1442 pIemCpu->abOpcode[offOpcode + 2],
1443 pIemCpu->abOpcode[offOpcode + 3]);
1444 pIemCpu->offOpcode = offOpcode + 4;
1445 }
1446 else
1447 *pu32 = 0;
1448 return rcStrict;
1449}
1450
1451
1452/**
1453 * Fetches the next opcode dword.
1454 *
1455 * @returns Strict VBox status code.
1456 * @param pIemCpu The IEM state.
1457 * @param pu32 Where to return the opcode double word.
1458 */
1459DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1460{
1461 uint8_t const offOpcode = pIemCpu->offOpcode;
1462 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1463 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1464
1465 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1466 pIemCpu->abOpcode[offOpcode + 1],
1467 pIemCpu->abOpcode[offOpcode + 2],
1468 pIemCpu->abOpcode[offOpcode + 3]);
1469 pIemCpu->offOpcode = offOpcode + 4;
1470 return VINF_SUCCESS;
1471}
1472
1473
1474/**
1475 * Fetches the next opcode dword, returns automatically on failure.
1476 *
1477 * @param a_pu32 Where to return the opcode dword.
1478 * @remark Implicitly references pIemCpu.
1479 */
1480#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1481 do \
1482 { \
1483 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1484 if (rcStrict2 != VINF_SUCCESS) \
1485 return rcStrict2; \
1486 } while (0)
1487
1488
1489/**
1490 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1491 *
1492 * @returns Strict VBox status code.
1493 * @param pIemCpu The IEM state.
1494 * @param pu32 Where to return the opcode dword.
1495 */
1496DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1497{
1498 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1499 if (rcStrict == VINF_SUCCESS)
1500 {
1501 uint8_t offOpcode = pIemCpu->offOpcode;
1502 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1503 pIemCpu->abOpcode[offOpcode + 1],
1504 pIemCpu->abOpcode[offOpcode + 2],
1505 pIemCpu->abOpcode[offOpcode + 3]);
1506 pIemCpu->offOpcode = offOpcode + 4;
1507 }
1508 else
1509 *pu64 = 0;
1510 return rcStrict;
1511}
1512
1513
1514/**
1515 * Fetches the next opcode dword, zero extending it to a quad word.
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pIemCpu The IEM state.
1519 * @param pu64 Where to return the opcode quad word.
1520 */
1521DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1522{
1523 uint8_t const offOpcode = pIemCpu->offOpcode;
1524 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1525 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1526
1527 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1528 pIemCpu->abOpcode[offOpcode + 1],
1529 pIemCpu->abOpcode[offOpcode + 2],
1530 pIemCpu->abOpcode[offOpcode + 3]);
1531 pIemCpu->offOpcode = offOpcode + 4;
1532 return VINF_SUCCESS;
1533}
1534
1535
1536/**
1537 * Fetches the next opcode dword and zero extends it to a quad word, returns
1538 * automatically on failure.
1539 *
1540 * @param a_pu64 Where to return the opcode quad word.
1541 * @remark Implicitly references pIemCpu.
1542 */
1543#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1544 do \
1545 { \
1546 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1547 if (rcStrict2 != VINF_SUCCESS) \
1548 return rcStrict2; \
1549 } while (0)
1550
1551
1552/**
1553 * Fetches the next signed double word from the opcode stream.
1554 *
1555 * @returns Strict VBox status code.
1556 * @param pIemCpu The IEM state.
1557 * @param pi32 Where to return the signed double word.
1558 */
1559DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1560{
1561 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1562}
1563
1564/**
1565 * Fetches the next signed double word from the opcode stream, returning
1566 * automatically on failure.
1567 *
1568 * @param pi32 Where to return the signed double word.
1569 * @remark Implicitly references pIemCpu.
1570 */
1571#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1572 do \
1573 { \
1574 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1575 if (rcStrict2 != VINF_SUCCESS) \
1576 return rcStrict2; \
1577 } while (0)
1578
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pIemCpu The IEM state.
1585 * @param pu64 Where to return the opcode qword.
1586 */
1587DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1588{
1589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1590 if (rcStrict == VINF_SUCCESS)
1591 {
1592 uint8_t offOpcode = pIemCpu->offOpcode;
1593 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1594 pIemCpu->abOpcode[offOpcode + 1],
1595 pIemCpu->abOpcode[offOpcode + 2],
1596 pIemCpu->abOpcode[offOpcode + 3]);
1597 pIemCpu->offOpcode = offOpcode + 4;
1598 }
1599 else
1600 *pu64 = 0;
1601 return rcStrict;
1602}
1603
1604
1605/**
1606 * Fetches the next opcode dword, sign extending it into a quad word.
1607 *
1608 * @returns Strict VBox status code.
1609 * @param pIemCpu The IEM state.
1610 * @param pu64 Where to return the opcode quad word.
1611 */
1612DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1613{
1614 uint8_t const offOpcode = pIemCpu->offOpcode;
1615 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1616 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1617
1618 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1619 pIemCpu->abOpcode[offOpcode + 1],
1620 pIemCpu->abOpcode[offOpcode + 2],
1621 pIemCpu->abOpcode[offOpcode + 3]);
1622 *pu64 = i32;
1623 pIemCpu->offOpcode = offOpcode + 4;
1624 return VINF_SUCCESS;
1625}
1626
1627
1628/**
1629 * Fetches the next opcode double word and sign extends it to a quad word,
1630 * returns automatically on failure.
1631 *
1632 * @param a_pu64 Where to return the opcode quad word.
1633 * @remark Implicitly references pIemCpu.
1634 */
1635#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1636 do \
1637 { \
1638 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1639 if (rcStrict2 != VINF_SUCCESS) \
1640 return rcStrict2; \
1641 } while (0)
1642
1643
1644/**
1645 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1646 *
1647 * @returns Strict VBox status code.
1648 * @param pIemCpu The IEM state.
1649 * @param pu64 Where to return the opcode qword.
1650 */
1651DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1652{
1653 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1654 if (rcStrict == VINF_SUCCESS)
1655 {
1656 uint8_t offOpcode = pIemCpu->offOpcode;
1657 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1658 pIemCpu->abOpcode[offOpcode + 1],
1659 pIemCpu->abOpcode[offOpcode + 2],
1660 pIemCpu->abOpcode[offOpcode + 3],
1661 pIemCpu->abOpcode[offOpcode + 4],
1662 pIemCpu->abOpcode[offOpcode + 5],
1663 pIemCpu->abOpcode[offOpcode + 6],
1664 pIemCpu->abOpcode[offOpcode + 7]);
1665 pIemCpu->offOpcode = offOpcode + 8;
1666 }
1667 else
1668 *pu64 = 0;
1669 return rcStrict;
1670}
1671
1672
1673/**
1674 * Fetches the next opcode qword.
1675 *
1676 * @returns Strict VBox status code.
1677 * @param pIemCpu The IEM state.
1678 * @param pu64 Where to return the opcode qword.
1679 */
1680DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1681{
1682 uint8_t const offOpcode = pIemCpu->offOpcode;
1683 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1684 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1685
1686 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1687 pIemCpu->abOpcode[offOpcode + 1],
1688 pIemCpu->abOpcode[offOpcode + 2],
1689 pIemCpu->abOpcode[offOpcode + 3],
1690 pIemCpu->abOpcode[offOpcode + 4],
1691 pIemCpu->abOpcode[offOpcode + 5],
1692 pIemCpu->abOpcode[offOpcode + 6],
1693 pIemCpu->abOpcode[offOpcode + 7]);
1694 pIemCpu->offOpcode = offOpcode + 8;
1695 return VINF_SUCCESS;
1696}
1697
1698
1699/**
1700 * Fetches the next opcode quad word, returns automatically on failure.
1701 *
1702 * @param a_pu64 Where to return the opcode quad word.
1703 * @remark Implicitly references pIemCpu.
1704 */
1705#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1706 do \
1707 { \
1708 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1709 if (rcStrict2 != VINF_SUCCESS) \
1710 return rcStrict2; \
1711 } while (0)
1712
1713
1714/** @name Misc Worker Functions.
1715 * @{
1716 */
1717
1718
1719/**
1720 * Validates a new SS segment.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pIemCpu The IEM per CPU instance data.
1724 * @param pCtx The CPU context.
1725 * @param NewSS The new SS selctor.
1726 * @param uCpl The CPL to load the stack for.
1727 * @param pDesc Where to return the descriptor.
1728 */
1729static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1730{
1731 NOREF(pCtx);
1732
1733 /* Null selectors are not allowed (we're not called for dispatching
1734 interrupts with SS=0 in long mode). */
1735 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1736 {
1737 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1738 return iemRaiseGeneralProtectionFault0(pIemCpu);
1739 }
1740
1741 /*
1742 * Read the descriptor.
1743 */
1744 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1745 if (rcStrict != VINF_SUCCESS)
1746 return rcStrict;
1747
1748 /*
1749 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1750 */
1751 if (!pDesc->Legacy.Gen.u1DescType)
1752 {
1753 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1754 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1755 }
1756
1757 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1758 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1759 {
1760 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1761 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1762 }
1763 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1764 if ((NewSS & X86_SEL_RPL) != uCpl)
1765 {
1766 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1767 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1768 }
1769 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1770 {
1771 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1772 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1773 }
1774
1775 /* Is it there? */
1776 /** @todo testcase: Is this checked before the canonical / limit check below? */
1777 if (!pDesc->Legacy.Gen.u1Present)
1778 {
1779 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1780 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1781 }
1782
1783 return VINF_SUCCESS;
1784}
1785
1786
1787/**
1788 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1789 * not.
1790 *
1791 * @param a_pIemCpu The IEM per CPU data.
1792 * @param a_pCtx The CPU context.
1793 */
1794#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1795# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1796 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1797 ? (a_pCtx)->eflags.u \
1798 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1799#else
1800# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1801 ( (a_pCtx)->eflags.u )
1802#endif
1803
1804/**
1805 * Updates the EFLAGS in the correct manner wrt. PATM.
1806 *
1807 * @param a_pIemCpu The IEM per CPU data.
1808 * @param a_pCtx The CPU context.
1809 */
1810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1812 do { \
1813 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1814 (a_pCtx)->eflags.u = (a_fEfl); \
1815 else \
1816 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1817 } while (0)
1818#else
1819# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1820 do { \
1821 (a_pCtx)->eflags.u = (a_fEfl); \
1822 } while (0)
1823#endif
1824
1825
1826/** @} */
1827
1828/** @name Raising Exceptions.
1829 *
1830 * @{
1831 */
1832
1833/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1834 * @{ */
1835/** CPU exception. */
1836#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1837/** External interrupt (from PIC, APIC, whatever). */
1838#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1839/** Software interrupt (int, into or bound). */
1840#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1841/** Takes an error code. */
1842#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1843/** Takes a CR2. */
1844#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1845/** Generated by the breakpoint instruction. */
1846#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1847/** @} */
1848
1849/**
1850 * Loads the specified stack far pointer from the TSS.
1851 *
1852 * @returns VBox strict status code.
1853 * @param pIemCpu The IEM per CPU instance data.
1854 * @param pCtx The CPU context.
1855 * @param uCpl The CPL to load the stack for.
1856 * @param pSelSS Where to return the new stack segment.
1857 * @param puEsp Where to return the new stack pointer.
1858 */
1859static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1860 PRTSEL pSelSS, uint32_t *puEsp)
1861{
1862 VBOXSTRICTRC rcStrict;
1863 Assert(uCpl < 4);
1864 *puEsp = 0; /* make gcc happy */
1865 *pSelSS = 0; /* make gcc happy */
1866
1867 switch (pCtx->tr.Attr.n.u4Type)
1868 {
1869 /*
1870 * 16-bit TSS (X86TSS16).
1871 */
1872 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1873 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1874 {
1875 uint32_t off = uCpl * 4 + 2;
1876 if (off + 4 > pCtx->tr.u32Limit)
1877 {
1878 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1879 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1880 }
1881
1882 uint32_t u32Tmp = 0; /* gcc maybe... */
1883 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1884 if (rcStrict == VINF_SUCCESS)
1885 {
1886 *puEsp = RT_LOWORD(u32Tmp);
1887 *pSelSS = RT_HIWORD(u32Tmp);
1888 return VINF_SUCCESS;
1889 }
1890 break;
1891 }
1892
1893 /*
1894 * 32-bit TSS (X86TSS32).
1895 */
1896 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1897 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1898 {
1899 uint32_t off = uCpl * 8 + 4;
1900 if (off + 7 > pCtx->tr.u32Limit)
1901 {
1902 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1903 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1904 }
1905
1906 uint64_t u64Tmp;
1907 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1908 if (rcStrict == VINF_SUCCESS)
1909 {
1910 *puEsp = u64Tmp & UINT32_MAX;
1911 *pSelSS = (RTSEL)(u64Tmp >> 32);
1912 return VINF_SUCCESS;
1913 }
1914 break;
1915 }
1916
1917 default:
1918 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1919 }
1920 return rcStrict;
1921}
1922
1923
1924/**
1925 * Adjust the CPU state according to the exception being raised.
1926 *
1927 * @param pCtx The CPU context.
1928 * @param u8Vector The exception that has been raised.
1929 */
1930DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1931{
1932 switch (u8Vector)
1933 {
1934 case X86_XCPT_DB:
1935 pCtx->dr[7] &= ~X86_DR7_GD;
1936 break;
1937 /** @todo Read the AMD and Intel exception reference... */
1938 }
1939}
1940
1941
1942/**
1943 * Implements exceptions and interrupts for real mode.
1944 *
1945 * @returns VBox strict status code.
1946 * @param pIemCpu The IEM per CPU instance data.
1947 * @param pCtx The CPU context.
1948 * @param cbInstr The number of bytes to offset rIP by in the return
1949 * address.
1950 * @param u8Vector The interrupt / exception vector number.
1951 * @param fFlags The flags.
1952 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1953 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1954 */
1955static VBOXSTRICTRC
1956iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1957 PCPUMCTX pCtx,
1958 uint8_t cbInstr,
1959 uint8_t u8Vector,
1960 uint32_t fFlags,
1961 uint16_t uErr,
1962 uint64_t uCr2)
1963{
1964 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1965 NOREF(uErr); NOREF(uCr2);
1966
1967 /*
1968 * Read the IDT entry.
1969 */
1970 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1971 {
1972 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1973 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1974 }
1975 RTFAR16 Idte;
1976 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1977 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1978 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1979 return rcStrict;
1980
1981 /*
1982 * Push the stack frame.
1983 */
1984 uint16_t *pu16Frame;
1985 uint64_t uNewRsp;
1986 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1987 if (rcStrict != VINF_SUCCESS)
1988 return rcStrict;
1989
1990 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
1991 pu16Frame[2] = (uint16_t)fEfl;
1992 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
1993 pu16Frame[0] = pCtx->ip + cbInstr;
1994 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1995 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1996 return rcStrict;
1997
1998 /*
1999 * Load the vector address into cs:ip and make exception specific state
2000 * adjustments.
2001 */
2002 pCtx->cs.Sel = Idte.sel;
2003 pCtx->cs.ValidSel = Idte.sel;
2004 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2005 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2006 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2007 pCtx->rip = Idte.off;
2008 fEfl &= ~X86_EFL_IF;
2009 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2010
2011 /** @todo do we actually do this in real mode? */
2012 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2013 iemRaiseXcptAdjustState(pCtx, u8Vector);
2014
2015 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2016}
2017
2018
2019/**
2020 * Implements exceptions and interrupts for protected mode.
2021 *
2022 * @returns VBox strict status code.
2023 * @param pIemCpu The IEM per CPU instance data.
2024 * @param pCtx The CPU context.
2025 * @param cbInstr The number of bytes to offset rIP by in the return
2026 * address.
2027 * @param u8Vector The interrupt / exception vector number.
2028 * @param fFlags The flags.
2029 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2030 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2031 */
2032static VBOXSTRICTRC
2033iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2034 PCPUMCTX pCtx,
2035 uint8_t cbInstr,
2036 uint8_t u8Vector,
2037 uint32_t fFlags,
2038 uint16_t uErr,
2039 uint64_t uCr2)
2040{
2041 NOREF(cbInstr);
2042
2043 /*
2044 * Read the IDT entry.
2045 */
2046 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2047 {
2048 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2049 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2050 }
2051 X86DESC Idte;
2052 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2053 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2054 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2055 return rcStrict;
2056 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2057 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2058 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2059
2060 /*
2061 * Check the descriptor type, DPL and such.
2062 * ASSUMES this is done in the same order as described for call-gate calls.
2063 */
2064 if (Idte.Gate.u1DescType)
2065 {
2066 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2067 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2068 }
2069 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2070 switch (Idte.Gate.u4Type)
2071 {
2072 case X86_SEL_TYPE_SYS_UNDEFINED:
2073 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2074 case X86_SEL_TYPE_SYS_LDT:
2075 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2076 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2077 case X86_SEL_TYPE_SYS_UNDEFINED2:
2078 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2079 case X86_SEL_TYPE_SYS_UNDEFINED3:
2080 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2081 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2082 case X86_SEL_TYPE_SYS_UNDEFINED4:
2083 {
2084 /** @todo check what actually happens when the type is wrong...
2085 * esp. call gates. */
2086 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2087 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2088 }
2089
2090 case X86_SEL_TYPE_SYS_286_INT_GATE:
2091 case X86_SEL_TYPE_SYS_386_INT_GATE:
2092 fEflToClear |= X86_EFL_IF;
2093 break;
2094
2095 case X86_SEL_TYPE_SYS_TASK_GATE:
2096 /** @todo task gates. */
2097 AssertFailedReturn(VERR_NOT_SUPPORTED);
2098
2099 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2100 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2101 break;
2102
2103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2104 }
2105
2106 /* Check DPL against CPL if applicable. */
2107 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2108 {
2109 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2110 {
2111 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2112 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2113 }
2114 }
2115
2116 /* Is it there? */
2117 if (!Idte.Gate.u1Present)
2118 {
2119 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2120 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2121 }
2122
2123 /* A null CS is bad. */
2124 RTSEL NewCS = Idte.Gate.u16Sel;
2125 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2126 {
2127 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2128 return iemRaiseGeneralProtectionFault0(pIemCpu);
2129 }
2130
2131 /* Fetch the descriptor for the new CS. */
2132 IEMSELDESC DescCS;
2133 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2134 if (rcStrict != VINF_SUCCESS)
2135 {
2136 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2137 return rcStrict;
2138 }
2139
2140 /* Must be a code segment. */
2141 if (!DescCS.Legacy.Gen.u1DescType)
2142 {
2143 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2144 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2145 }
2146 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2147 {
2148 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2149 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2150 }
2151
2152 /* Don't allow lowering the privilege level. */
2153 /** @todo Does the lowering of privileges apply to software interrupts
2154 * only? This has bearings on the more-privileged or
2155 * same-privilege stack behavior further down. A testcase would
2156 * be nice. */
2157 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2158 {
2159 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2160 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2161 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2162 }
2163 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
2164
2165 /* Check the new EIP against the new CS limit. */
2166 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2167 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2168 ? Idte.Gate.u16OffsetLow
2169 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2170 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2171 if (uNewEip > cbLimitCS)
2172 {
2173 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2174 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2175 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2176 }
2177
2178 /* Make sure the selector is present. */
2179 if (!DescCS.Legacy.Gen.u1Present)
2180 {
2181 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2182 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2183 }
2184
2185 /*
2186 * If the privilege level changes, we need to get a new stack from the TSS.
2187 * This in turns means validating the new SS and ESP...
2188 */
2189 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2190 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2191 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2192 if (uNewCpl != pIemCpu->uCpl)
2193 {
2194 RTSEL NewSS;
2195 uint32_t uNewEsp;
2196 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2197 if (rcStrict != VINF_SUCCESS)
2198 return rcStrict;
2199
2200 IEMSELDESC DescSS;
2201 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2202 if (rcStrict != VINF_SUCCESS)
2203 return rcStrict;
2204
2205 /* Check that there is sufficient space for the stack frame. */
2206 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2207 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2208 {
2209 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2210 }
2211
2212 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2213 if ( uNewEsp - 1 > cbLimitSS
2214 || uNewEsp < cbStackFrame)
2215 {
2216 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2217 u8Vector, NewSS, uNewEsp, cbStackFrame));
2218 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2219 }
2220
2221 /*
2222 * Start making changes.
2223 */
2224
2225 /* Create the stack frame. */
2226 RTPTRUNION uStackFrame;
2227 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2228 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2229 if (rcStrict != VINF_SUCCESS)
2230 return rcStrict;
2231 void * const pvStackFrame = uStackFrame.pv;
2232
2233 if (fFlags & IEM_XCPT_FLAGS_ERR)
2234 *uStackFrame.pu32++ = uErr;
2235 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2236 ? pCtx->eip + cbInstr : pCtx->eip;
2237 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2238 uStackFrame.pu32[2] = fEfl;
2239 uStackFrame.pu32[3] = pCtx->esp;
2240 uStackFrame.pu32[4] = pCtx->ss.Sel;
2241 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2242 if (rcStrict != VINF_SUCCESS)
2243 return rcStrict;
2244
2245 /* Mark the selectors 'accessed' (hope this is the correct time). */
2246 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2247 * after pushing the stack frame? (Write protect the gdt + stack to
2248 * find out.) */
2249 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2250 {
2251 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2252 if (rcStrict != VINF_SUCCESS)
2253 return rcStrict;
2254 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2255 }
2256
2257 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2258 {
2259 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2260 if (rcStrict != VINF_SUCCESS)
2261 return rcStrict;
2262 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2263 }
2264
2265 /*
2266 * Start comitting the register changes (joins with the DPL=CPL branch).
2267 */
2268 pCtx->ss.Sel = NewSS;
2269 pCtx->ss.ValidSel = NewSS;
2270 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2271 pCtx->ss.u32Limit = cbLimitSS;
2272 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2273 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2274 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2275 pIemCpu->uCpl = uNewCpl;
2276 }
2277 /*
2278 * Same privilege, no stack change and smaller stack frame.
2279 */
2280 else
2281 {
2282 uint64_t uNewRsp;
2283 RTPTRUNION uStackFrame;
2284 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2285 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2286 if (rcStrict != VINF_SUCCESS)
2287 return rcStrict;
2288 void * const pvStackFrame = uStackFrame.pv;
2289
2290 if (fFlags & IEM_XCPT_FLAGS_ERR)
2291 *uStackFrame.pu32++ = uErr;
2292 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2293 ? pCtx->eip + cbInstr : pCtx->eip;
2294 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2295 uStackFrame.pu32[2] = fEfl;
2296 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2297 if (rcStrict != VINF_SUCCESS)
2298 return rcStrict;
2299
2300 /* Mark the CS selector as 'accessed'. */
2301 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2302 {
2303 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2307 }
2308
2309 /*
2310 * Start committing the register changes (joins with the other branch).
2311 */
2312 pCtx->rsp = uNewRsp;
2313 }
2314
2315 /* ... register committing continues. */
2316 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2317 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2318 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2319 pCtx->cs.u32Limit = cbLimitCS;
2320 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2321 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2322
2323 pCtx->rip = uNewEip;
2324 fEfl &= ~fEflToClear;
2325 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2326
2327 if (fFlags & IEM_XCPT_FLAGS_CR2)
2328 pCtx->cr2 = uCr2;
2329
2330 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2331 iemRaiseXcptAdjustState(pCtx, u8Vector);
2332
2333 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2334}
2335
2336
2337/**
2338 * Implements exceptions and interrupts for V8086 mode.
2339 *
2340 * @returns VBox strict status code.
2341 * @param pIemCpu The IEM per CPU instance data.
2342 * @param pCtx The CPU context.
2343 * @param cbInstr The number of bytes to offset rIP by in the return
2344 * address.
2345 * @param u8Vector The interrupt / exception vector number.
2346 * @param fFlags The flags.
2347 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2348 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2349 */
2350static VBOXSTRICTRC
2351iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2352 PCPUMCTX pCtx,
2353 uint8_t cbInstr,
2354 uint8_t u8Vector,
2355 uint32_t fFlags,
2356 uint16_t uErr,
2357 uint64_t uCr2)
2358{
2359 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2360 /** @todo implement me. */
2361 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2362}
2363
2364
2365/**
2366 * Implements exceptions and interrupts for long mode.
2367 *
2368 * @returns VBox strict status code.
2369 * @param pIemCpu The IEM per CPU instance data.
2370 * @param pCtx The CPU context.
2371 * @param cbInstr The number of bytes to offset rIP by in the return
2372 * address.
2373 * @param u8Vector The interrupt / exception vector number.
2374 * @param fFlags The flags.
2375 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2376 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2377 */
2378static VBOXSTRICTRC
2379iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2380 PCPUMCTX pCtx,
2381 uint8_t cbInstr,
2382 uint8_t u8Vector,
2383 uint32_t fFlags,
2384 uint16_t uErr,
2385 uint64_t uCr2)
2386{
2387 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2388 /** @todo implement me. */
2389 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
2390}
2391
2392
2393/**
2394 * Implements exceptions and interrupts.
2395 *
2396 * All exceptions and interrupts goes thru this function!
2397 *
2398 * @returns VBox strict status code.
2399 * @param pIemCpu The IEM per CPU instance data.
2400 * @param cbInstr The number of bytes to offset rIP by in the return
2401 * address.
2402 * @param u8Vector The interrupt / exception vector number.
2403 * @param fFlags The flags.
2404 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2405 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2406 */
2407DECL_NO_INLINE(static, VBOXSTRICTRC)
2408iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2409 uint8_t cbInstr,
2410 uint8_t u8Vector,
2411 uint32_t fFlags,
2412 uint16_t uErr,
2413 uint64_t uCr2)
2414{
2415 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2416
2417 /*
2418 * Do recursion accounting.
2419 */
2420 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2421 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2422 if (pIemCpu->cXcptRecursions == 0)
2423 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2424 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2425 else
2426 {
2427 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2428 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2429
2430 /** @todo double and tripple faults. */
2431 if (pIemCpu->cXcptRecursions >= 3)
2432 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2433
2434 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2435 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2436 {
2437 ....
2438 } */
2439 }
2440 pIemCpu->cXcptRecursions++;
2441 pIemCpu->uCurXcpt = u8Vector;
2442 pIemCpu->fCurXcpt = fFlags;
2443
2444 /*
2445 * Extensive logging.
2446 */
2447#if defined(LOG_ENABLED) && defined(IN_RING3)
2448 if (LogIs3Enabled())
2449 {
2450 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2451 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2452 char szRegs[4096];
2453 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2454 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2455 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2456 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2457 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2458 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2459 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2460 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2461 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2462 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2463 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2464 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2465 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2466 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2467 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2468 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2469 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2470 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2471 " efer=%016VR{efer}\n"
2472 " pat=%016VR{pat}\n"
2473 " sf_mask=%016VR{sf_mask}\n"
2474 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2475 " lstar=%016VR{lstar}\n"
2476 " star=%016VR{star} cstar=%016VR{cstar}\n"
2477 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2478 );
2479
2480 char szInstr[256];
2481 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2482 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2483 szInstr, sizeof(szInstr), NULL);
2484 Log3(("%s%s\n", szRegs, szInstr));
2485 }
2486#endif /* LOG_ENABLED */
2487
2488 /*
2489 * Call the mode specific worker function.
2490 */
2491 VBOXSTRICTRC rcStrict;
2492 if (!(pCtx->cr0 & X86_CR0_PE))
2493 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2494 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2495 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2496 else if (!pCtx->eflags.Bits.u1VM)
2497 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2498 else
2499 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2500
2501 /*
2502 * Unwind.
2503 */
2504 pIemCpu->cXcptRecursions--;
2505 pIemCpu->uCurXcpt = uPrevXcpt;
2506 pIemCpu->fCurXcpt = fPrevXcpt;
2507 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2508 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2509 return rcStrict;
2510}
2511
2512
2513/** \#DE - 00. */
2514DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2515{
2516 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2517}
2518
2519
2520/** \#DB - 01. */
2521DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2522{
2523 /** @todo set/clear RF. */
2524 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2525}
2526
2527
2528/** \#UD - 06. */
2529DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2530{
2531 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2532}
2533
2534
2535/** \#NM - 07. */
2536DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2537{
2538 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2539}
2540
2541
2542#ifdef SOME_UNUSED_FUNCTION
2543/** \#TS(err) - 0a. */
2544DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2545{
2546 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2547}
2548#endif
2549
2550
2551/** \#TS(tr) - 0a. */
2552DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2553{
2554 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2555 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2556}
2557
2558
2559/** \#NP(err) - 0b. */
2560DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2561{
2562 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2563}
2564
2565
2566/** \#NP(seg) - 0b. */
2567DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2568{
2569 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2570 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2571}
2572
2573
2574/** \#NP(sel) - 0b. */
2575DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2576{
2577 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2578 uSel & ~X86_SEL_RPL, 0);
2579}
2580
2581
2582/** \#SS(seg) - 0c. */
2583DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2584{
2585 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2586 uSel & ~X86_SEL_RPL, 0);
2587}
2588
2589
2590/** \#GP(n) - 0d. */
2591DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2592{
2593 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2594}
2595
2596
2597/** \#GP(0) - 0d. */
2598DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2599{
2600 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2601}
2602
2603
2604/** \#GP(sel) - 0d. */
2605DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2606{
2607 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2608 Sel & ~X86_SEL_RPL, 0);
2609}
2610
2611
2612/** \#GP(0) - 0d. */
2613DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2614{
2615 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2616}
2617
2618
2619/** \#GP(sel) - 0d. */
2620DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2621{
2622 NOREF(iSegReg); NOREF(fAccess);
2623 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2624 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2625}
2626
2627
2628/** \#GP(sel) - 0d. */
2629DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2630{
2631 NOREF(Sel);
2632 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2633}
2634
2635
2636/** \#GP(sel) - 0d. */
2637DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2638{
2639 NOREF(iSegReg); NOREF(fAccess);
2640 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2641}
2642
2643
2644/** \#PF(n) - 0e. */
2645DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2646{
2647 uint16_t uErr;
2648 switch (rc)
2649 {
2650 case VERR_PAGE_NOT_PRESENT:
2651 case VERR_PAGE_TABLE_NOT_PRESENT:
2652 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2653 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2654 uErr = 0;
2655 break;
2656
2657 default:
2658 AssertMsgFailed(("%Rrc\n", rc));
2659 case VERR_ACCESS_DENIED:
2660 uErr = X86_TRAP_PF_P;
2661 break;
2662
2663 /** @todo reserved */
2664 }
2665
2666 if (pIemCpu->uCpl == 3)
2667 uErr |= X86_TRAP_PF_US;
2668
2669 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2670 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2671 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2672 uErr |= X86_TRAP_PF_ID;
2673
2674 /* Note! RW access callers reporting a WRITE protection fault, will clear
2675 the READ flag before calling. So, read-modify-write accesses (RW)
2676 can safely be reported as READ faults. */
2677 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2678 uErr |= X86_TRAP_PF_RW;
2679
2680 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2681 uErr, GCPtrWhere);
2682}
2683
2684
2685/** \#MF(0) - 10. */
2686DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2687{
2688 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2689}
2690
2691
2692/** \#AC(0) - 11. */
2693DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2694{
2695 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2696}
2697
2698
2699/**
2700 * Macro for calling iemCImplRaiseDivideError().
2701 *
2702 * This enables us to add/remove arguments and force different levels of
2703 * inlining as we wish.
2704 *
2705 * @return Strict VBox status code.
2706 */
2707#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2708IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2709{
2710 NOREF(cbInstr);
2711 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2712}
2713
2714
2715/**
2716 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2717 *
2718 * This enables us to add/remove arguments and force different levels of
2719 * inlining as we wish.
2720 *
2721 * @return Strict VBox status code.
2722 */
2723#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2724IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2725{
2726 NOREF(cbInstr);
2727 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2728}
2729
2730
2731/**
2732 * Macro for calling iemCImplRaiseInvalidOpcode().
2733 *
2734 * This enables us to add/remove arguments and force different levels of
2735 * inlining as we wish.
2736 *
2737 * @return Strict VBox status code.
2738 */
2739#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2740IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2741{
2742 NOREF(cbInstr);
2743 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2744}
2745
2746
2747/** @} */
2748
2749
2750/*
2751 *
2752 * Helpers routines.
2753 * Helpers routines.
2754 * Helpers routines.
2755 *
2756 */
2757
2758/**
2759 * Recalculates the effective operand size.
2760 *
2761 * @param pIemCpu The IEM state.
2762 */
2763static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2764{
2765 switch (pIemCpu->enmCpuMode)
2766 {
2767 case IEMMODE_16BIT:
2768 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2769 break;
2770 case IEMMODE_32BIT:
2771 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2772 break;
2773 case IEMMODE_64BIT:
2774 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2775 {
2776 case 0:
2777 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2778 break;
2779 case IEM_OP_PRF_SIZE_OP:
2780 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2781 break;
2782 case IEM_OP_PRF_SIZE_REX_W:
2783 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2784 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2785 break;
2786 }
2787 break;
2788 default:
2789 AssertFailed();
2790 }
2791}
2792
2793
2794/**
2795 * Sets the default operand size to 64-bit and recalculates the effective
2796 * operand size.
2797 *
2798 * @param pIemCpu The IEM state.
2799 */
2800static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2801{
2802 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2803 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2804 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2805 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2806 else
2807 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2808}
2809
2810
2811/*
2812 *
2813 * Common opcode decoders.
2814 * Common opcode decoders.
2815 * Common opcode decoders.
2816 *
2817 */
2818//#include <iprt/mem.h>
2819
2820/**
2821 * Used to add extra details about a stub case.
2822 * @param pIemCpu The IEM per CPU state.
2823 */
2824static void iemOpStubMsg2(PIEMCPU pIemCpu)
2825{
2826#if defined(LOG_ENABLED) && defined(IN_RING3)
2827 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2828 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2829 char szRegs[4096];
2830 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2831 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2832 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2833 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2834 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2835 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2836 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2837 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2838 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2839 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2840 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2841 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2842 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2843 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2844 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2845 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2846 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2847 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2848 " efer=%016VR{efer}\n"
2849 " pat=%016VR{pat}\n"
2850 " sf_mask=%016VR{sf_mask}\n"
2851 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2852 " lstar=%016VR{lstar}\n"
2853 " star=%016VR{star} cstar=%016VR{cstar}\n"
2854 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2855 );
2856
2857 char szInstr[256];
2858 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2859 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2860 szInstr, sizeof(szInstr), NULL);
2861
2862 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2863#else
2864 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2865#endif
2866}
2867
2868/**
2869 * Complains about a stub.
2870 *
2871 * Providing two versions of this macro, one for daily use and one for use when
2872 * working on IEM.
2873 */
2874#if 0
2875# define IEMOP_BITCH_ABOUT_STUB() \
2876 do { \
2877 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2878 iemOpStubMsg2(pIemCpu); \
2879 RTAssertPanic(); \
2880 } while (0)
2881#else
2882# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
2883#endif
2884
2885/** Stubs an opcode. */
2886#define FNIEMOP_STUB(a_Name) \
2887 FNIEMOP_DEF(a_Name) \
2888 { \
2889 IEMOP_BITCH_ABOUT_STUB(); \
2890 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2891 } \
2892 typedef int ignore_semicolon
2893
2894/** Stubs an opcode. */
2895#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2896 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2897 { \
2898 IEMOP_BITCH_ABOUT_STUB(); \
2899 NOREF(a_Name0); \
2900 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2901 } \
2902 typedef int ignore_semicolon
2903
2904/** Stubs an opcode which currently should raise \#UD. */
2905#define FNIEMOP_UD_STUB(a_Name) \
2906 FNIEMOP_DEF(a_Name) \
2907 { \
2908 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2909 return IEMOP_RAISE_INVALID_OPCODE(); \
2910 } \
2911 typedef int ignore_semicolon
2912
2913/** Stubs an opcode which currently should raise \#UD. */
2914#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2915 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2916 { \
2917 NOREF(a_Name0); \
2918 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2919 return IEMOP_RAISE_INVALID_OPCODE(); \
2920 } \
2921 typedef int ignore_semicolon
2922
2923
2924
2925/** @name Register Access.
2926 * @{
2927 */
2928
2929/**
2930 * Gets a reference (pointer) to the specified hidden segment register.
2931 *
2932 * @returns Hidden register reference.
2933 * @param pIemCpu The per CPU data.
2934 * @param iSegReg The segment register.
2935 */
2936static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2937{
2938 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2939 PCPUMSELREG pSReg;
2940 switch (iSegReg)
2941 {
2942 case X86_SREG_ES: pSReg = &pCtx->es; break;
2943 case X86_SREG_CS: pSReg = &pCtx->cs; break;
2944 case X86_SREG_SS: pSReg = &pCtx->ss; break;
2945 case X86_SREG_DS: pSReg = &pCtx->ds; break;
2946 case X86_SREG_FS: pSReg = &pCtx->fs; break;
2947 case X86_SREG_GS: pSReg = &pCtx->gs; break;
2948 default:
2949 AssertFailedReturn(NULL);
2950 }
2951#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2952 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
2953 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
2954#else
2955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2956#endif
2957 return pSReg;
2958}
2959
2960
2961/**
2962 * Gets a reference (pointer) to the specified segment register (the selector
2963 * value).
2964 *
2965 * @returns Pointer to the selector variable.
2966 * @param pIemCpu The per CPU data.
2967 * @param iSegReg The segment register.
2968 */
2969static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2970{
2971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2972 switch (iSegReg)
2973 {
2974 case X86_SREG_ES: return &pCtx->es.Sel;
2975 case X86_SREG_CS: return &pCtx->cs.Sel;
2976 case X86_SREG_SS: return &pCtx->ss.Sel;
2977 case X86_SREG_DS: return &pCtx->ds.Sel;
2978 case X86_SREG_FS: return &pCtx->fs.Sel;
2979 case X86_SREG_GS: return &pCtx->gs.Sel;
2980 }
2981 AssertFailedReturn(NULL);
2982}
2983
2984
2985/**
2986 * Fetches the selector value of a segment register.
2987 *
2988 * @returns The selector value.
2989 * @param pIemCpu The per CPU data.
2990 * @param iSegReg The segment register.
2991 */
2992static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2993{
2994 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2995 switch (iSegReg)
2996 {
2997 case X86_SREG_ES: return pCtx->es.Sel;
2998 case X86_SREG_CS: return pCtx->cs.Sel;
2999 case X86_SREG_SS: return pCtx->ss.Sel;
3000 case X86_SREG_DS: return pCtx->ds.Sel;
3001 case X86_SREG_FS: return pCtx->fs.Sel;
3002 case X86_SREG_GS: return pCtx->gs.Sel;
3003 }
3004 AssertFailedReturn(0xffff);
3005}
3006
3007
3008/**
3009 * Gets a reference (pointer) to the specified general register.
3010 *
3011 * @returns Register reference.
3012 * @param pIemCpu The per CPU data.
3013 * @param iReg The general register.
3014 */
3015static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3016{
3017 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3018 switch (iReg)
3019 {
3020 case X86_GREG_xAX: return &pCtx->rax;
3021 case X86_GREG_xCX: return &pCtx->rcx;
3022 case X86_GREG_xDX: return &pCtx->rdx;
3023 case X86_GREG_xBX: return &pCtx->rbx;
3024 case X86_GREG_xSP: return &pCtx->rsp;
3025 case X86_GREG_xBP: return &pCtx->rbp;
3026 case X86_GREG_xSI: return &pCtx->rsi;
3027 case X86_GREG_xDI: return &pCtx->rdi;
3028 case X86_GREG_x8: return &pCtx->r8;
3029 case X86_GREG_x9: return &pCtx->r9;
3030 case X86_GREG_x10: return &pCtx->r10;
3031 case X86_GREG_x11: return &pCtx->r11;
3032 case X86_GREG_x12: return &pCtx->r12;
3033 case X86_GREG_x13: return &pCtx->r13;
3034 case X86_GREG_x14: return &pCtx->r14;
3035 case X86_GREG_x15: return &pCtx->r15;
3036 }
3037 AssertFailedReturn(NULL);
3038}
3039
3040
3041/**
3042 * Gets a reference (pointer) to the specified 8-bit general register.
3043 *
3044 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3045 *
3046 * @returns Register reference.
3047 * @param pIemCpu The per CPU data.
3048 * @param iReg The register.
3049 */
3050static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3051{
3052 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3053 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3054
3055 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3056 if (iReg >= 4)
3057 pu8Reg++;
3058 return pu8Reg;
3059}
3060
3061
3062/**
3063 * Fetches the value of a 8-bit general register.
3064 *
3065 * @returns The register value.
3066 * @param pIemCpu The per CPU data.
3067 * @param iReg The register.
3068 */
3069static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3070{
3071 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3072 return *pbSrc;
3073}
3074
3075
3076/**
3077 * Fetches the value of a 16-bit general register.
3078 *
3079 * @returns The register value.
3080 * @param pIemCpu The per CPU data.
3081 * @param iReg The register.
3082 */
3083static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3084{
3085 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3086}
3087
3088
3089/**
3090 * Fetches the value of a 32-bit general register.
3091 *
3092 * @returns The register value.
3093 * @param pIemCpu The per CPU data.
3094 * @param iReg The register.
3095 */
3096static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3097{
3098 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3099}
3100
3101
3102/**
3103 * Fetches the value of a 64-bit general register.
3104 *
3105 * @returns The register value.
3106 * @param pIemCpu The per CPU data.
3107 * @param iReg The register.
3108 */
3109static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3110{
3111 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3112}
3113
3114
3115/**
3116 * Is the FPU state in FXSAVE format or not.
3117 *
3118 * @returns true if it is, false if it's in FNSAVE.
3119 * @param pVCpu Pointer to the VMCPU.
3120 */
3121DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3122{
3123#ifdef RT_ARCH_AMD64
3124 NOREF(pIemCpu);
3125 return true;
3126#else
3127 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3128 return true;
3129#endif
3130}
3131
3132
3133/**
3134 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3135 *
3136 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3137 * segment limit.
3138 *
3139 * @param pIemCpu The per CPU data.
3140 * @param offNextInstr The offset of the next instruction.
3141 */
3142static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3143{
3144 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3145 switch (pIemCpu->enmEffOpSize)
3146 {
3147 case IEMMODE_16BIT:
3148 {
3149 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3150 if ( uNewIp > pCtx->cs.u32Limit
3151 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3152 return iemRaiseGeneralProtectionFault0(pIemCpu);
3153 pCtx->rip = uNewIp;
3154 break;
3155 }
3156
3157 case IEMMODE_32BIT:
3158 {
3159 Assert(pCtx->rip <= UINT32_MAX);
3160 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3161
3162 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3163 if (uNewEip > pCtx->cs.u32Limit)
3164 return iemRaiseGeneralProtectionFault0(pIemCpu);
3165 pCtx->rip = uNewEip;
3166 break;
3167 }
3168
3169 case IEMMODE_64BIT:
3170 {
3171 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3172
3173 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3174 if (!IEM_IS_CANONICAL(uNewRip))
3175 return iemRaiseGeneralProtectionFault0(pIemCpu);
3176 pCtx->rip = uNewRip;
3177 break;
3178 }
3179
3180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3181 }
3182
3183 return VINF_SUCCESS;
3184}
3185
3186
3187/**
3188 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3189 *
3190 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3191 * segment limit.
3192 *
3193 * @returns Strict VBox status code.
3194 * @param pIemCpu The per CPU data.
3195 * @param offNextInstr The offset of the next instruction.
3196 */
3197static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3198{
3199 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3200 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3201
3202 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3203 if ( uNewIp > pCtx->cs.u32Limit
3204 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3205 return iemRaiseGeneralProtectionFault0(pIemCpu);
3206 /** @todo Test 16-bit jump in 64-bit mode. */
3207 pCtx->rip = uNewIp;
3208
3209 return VINF_SUCCESS;
3210}
3211
3212
3213/**
3214 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3215 *
3216 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3217 * segment limit.
3218 *
3219 * @returns Strict VBox status code.
3220 * @param pIemCpu The per CPU data.
3221 * @param offNextInstr The offset of the next instruction.
3222 */
3223static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3224{
3225 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3226 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3227
3228 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3229 {
3230 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3231
3232 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3233 if (uNewEip > pCtx->cs.u32Limit)
3234 return iemRaiseGeneralProtectionFault0(pIemCpu);
3235 pCtx->rip = uNewEip;
3236 }
3237 else
3238 {
3239 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3240
3241 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3242 if (!IEM_IS_CANONICAL(uNewRip))
3243 return iemRaiseGeneralProtectionFault0(pIemCpu);
3244 pCtx->rip = uNewRip;
3245 }
3246 return VINF_SUCCESS;
3247}
3248
3249
3250/**
3251 * Performs a near jump to the specified address.
3252 *
3253 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3254 * segment limit.
3255 *
3256 * @param pIemCpu The per CPU data.
3257 * @param uNewRip The new RIP value.
3258 */
3259static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3260{
3261 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3262 switch (pIemCpu->enmEffOpSize)
3263 {
3264 case IEMMODE_16BIT:
3265 {
3266 Assert(uNewRip <= UINT16_MAX);
3267 if ( uNewRip > pCtx->cs.u32Limit
3268 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3269 return iemRaiseGeneralProtectionFault0(pIemCpu);
3270 /** @todo Test 16-bit jump in 64-bit mode. */
3271 pCtx->rip = uNewRip;
3272 break;
3273 }
3274
3275 case IEMMODE_32BIT:
3276 {
3277 Assert(uNewRip <= UINT32_MAX);
3278 Assert(pCtx->rip <= UINT32_MAX);
3279 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3280
3281 if (uNewRip > pCtx->cs.u32Limit)
3282 return iemRaiseGeneralProtectionFault0(pIemCpu);
3283 pCtx->rip = uNewRip;
3284 break;
3285 }
3286
3287 case IEMMODE_64BIT:
3288 {
3289 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3290
3291 if (!IEM_IS_CANONICAL(uNewRip))
3292 return iemRaiseGeneralProtectionFault0(pIemCpu);
3293 pCtx->rip = uNewRip;
3294 break;
3295 }
3296
3297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3298 }
3299
3300 return VINF_SUCCESS;
3301}
3302
3303
3304/**
3305 * Get the address of the top of the stack.
3306 *
3307 * @param pCtx The CPU context which SP/ESP/RSP should be
3308 * read.
3309 */
3310DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3311{
3312 if (pCtx->ss.Attr.n.u1Long)
3313 return pCtx->rsp;
3314 if (pCtx->ss.Attr.n.u1DefBig)
3315 return pCtx->esp;
3316 return pCtx->sp;
3317}
3318
3319
3320/**
3321 * Updates the RIP/EIP/IP to point to the next instruction.
3322 *
3323 * @param pIemCpu The per CPU data.
3324 * @param cbInstr The number of bytes to add.
3325 */
3326static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3327{
3328 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3329 switch (pIemCpu->enmCpuMode)
3330 {
3331 case IEMMODE_16BIT:
3332 Assert(pCtx->rip <= UINT16_MAX);
3333 pCtx->eip += cbInstr;
3334 pCtx->eip &= UINT32_C(0xffff);
3335 break;
3336
3337 case IEMMODE_32BIT:
3338 pCtx->eip += cbInstr;
3339 Assert(pCtx->rip <= UINT32_MAX);
3340 break;
3341
3342 case IEMMODE_64BIT:
3343 pCtx->rip += cbInstr;
3344 break;
3345 default: AssertFailed();
3346 }
3347}
3348
3349
3350/**
3351 * Updates the RIP/EIP/IP to point to the next instruction.
3352 *
3353 * @param pIemCpu The per CPU data.
3354 */
3355static void iemRegUpdateRip(PIEMCPU pIemCpu)
3356{
3357 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3358}
3359
3360
3361/**
3362 * Adds to the stack pointer.
3363 *
3364 * @param pCtx The CPU context which SP/ESP/RSP should be
3365 * updated.
3366 * @param cbToAdd The number of bytes to add.
3367 */
3368DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3369{
3370 if (pCtx->ss.Attr.n.u1Long)
3371 pCtx->rsp += cbToAdd;
3372 else if (pCtx->ss.Attr.n.u1DefBig)
3373 pCtx->esp += cbToAdd;
3374 else
3375 pCtx->sp += cbToAdd;
3376}
3377
3378
3379/**
3380 * Subtracts from the stack pointer.
3381 *
3382 * @param pCtx The CPU context which SP/ESP/RSP should be
3383 * updated.
3384 * @param cbToSub The number of bytes to subtract.
3385 */
3386DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3387{
3388 if (pCtx->ss.Attr.n.u1Long)
3389 pCtx->rsp -= cbToSub;
3390 else if (pCtx->ss.Attr.n.u1DefBig)
3391 pCtx->esp -= cbToSub;
3392 else
3393 pCtx->sp -= cbToSub;
3394}
3395
3396
3397/**
3398 * Adds to the temporary stack pointer.
3399 *
3400 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3401 * @param cbToAdd The number of bytes to add.
3402 * @param pCtx Where to get the current stack mode.
3403 */
3404DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3405{
3406 if (pCtx->ss.Attr.n.u1Long)
3407 pTmpRsp->u += cbToAdd;
3408 else if (pCtx->ss.Attr.n.u1DefBig)
3409 pTmpRsp->DWords.dw0 += cbToAdd;
3410 else
3411 pTmpRsp->Words.w0 += cbToAdd;
3412}
3413
3414
3415/**
3416 * Subtracts from the temporary stack pointer.
3417 *
3418 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3419 * @param cbToSub The number of bytes to subtract.
3420 * @param pCtx Where to get the current stack mode.
3421 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3422 * expecting that.
3423 */
3424DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3425{
3426 if (pCtx->ss.Attr.n.u1Long)
3427 pTmpRsp->u -= cbToSub;
3428 else if (pCtx->ss.Attr.n.u1DefBig)
3429 pTmpRsp->DWords.dw0 -= cbToSub;
3430 else
3431 pTmpRsp->Words.w0 -= cbToSub;
3432}
3433
3434
3435/**
3436 * Calculates the effective stack address for a push of the specified size as
3437 * well as the new RSP value (upper bits may be masked).
3438 *
3439 * @returns Effective stack addressf for the push.
3440 * @param pCtx Where to get the current stack mode.
3441 * @param cbItem The size of the stack item to pop.
3442 * @param puNewRsp Where to return the new RSP value.
3443 */
3444DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3445{
3446 RTUINT64U uTmpRsp;
3447 RTGCPTR GCPtrTop;
3448 uTmpRsp.u = pCtx->rsp;
3449
3450 if (pCtx->ss.Attr.n.u1Long)
3451 GCPtrTop = uTmpRsp.u -= cbItem;
3452 else if (pCtx->ss.Attr.n.u1DefBig)
3453 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3454 else
3455 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3456 *puNewRsp = uTmpRsp.u;
3457 return GCPtrTop;
3458}
3459
3460
3461/**
3462 * Gets the current stack pointer and calculates the value after a pop of the
3463 * specified size.
3464 *
3465 * @returns Current stack pointer.
3466 * @param pCtx Where to get the current stack mode.
3467 * @param cbItem The size of the stack item to pop.
3468 * @param puNewRsp Where to return the new RSP value.
3469 */
3470DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3471{
3472 RTUINT64U uTmpRsp;
3473 RTGCPTR GCPtrTop;
3474 uTmpRsp.u = pCtx->rsp;
3475
3476 if (pCtx->ss.Attr.n.u1Long)
3477 {
3478 GCPtrTop = uTmpRsp.u;
3479 uTmpRsp.u += cbItem;
3480 }
3481 else if (pCtx->ss.Attr.n.u1DefBig)
3482 {
3483 GCPtrTop = uTmpRsp.DWords.dw0;
3484 uTmpRsp.DWords.dw0 += cbItem;
3485 }
3486 else
3487 {
3488 GCPtrTop = uTmpRsp.Words.w0;
3489 uTmpRsp.Words.w0 += cbItem;
3490 }
3491 *puNewRsp = uTmpRsp.u;
3492 return GCPtrTop;
3493}
3494
3495
3496/**
3497 * Calculates the effective stack address for a push of the specified size as
3498 * well as the new temporary RSP value (upper bits may be masked).
3499 *
3500 * @returns Effective stack addressf for the push.
3501 * @param pTmpRsp The temporary stack pointer. This is updated.
3502 * @param cbItem The size of the stack item to pop.
3503 * @param puNewRsp Where to return the new RSP value.
3504 */
3505DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3506{
3507 RTGCPTR GCPtrTop;
3508
3509 if (pCtx->ss.Attr.n.u1Long)
3510 GCPtrTop = pTmpRsp->u -= cbItem;
3511 else if (pCtx->ss.Attr.n.u1DefBig)
3512 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3513 else
3514 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3515 return GCPtrTop;
3516}
3517
3518
3519/**
3520 * Gets the effective stack address for a pop of the specified size and
3521 * calculates and updates the temporary RSP.
3522 *
3523 * @returns Current stack pointer.
3524 * @param pTmpRsp The temporary stack pointer. This is updated.
3525 * @param pCtx Where to get the current stack mode.
3526 * @param cbItem The size of the stack item to pop.
3527 */
3528DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3529{
3530 RTGCPTR GCPtrTop;
3531 if (pCtx->ss.Attr.n.u1Long)
3532 {
3533 GCPtrTop = pTmpRsp->u;
3534 pTmpRsp->u += cbItem;
3535 }
3536 else if (pCtx->ss.Attr.n.u1DefBig)
3537 {
3538 GCPtrTop = pTmpRsp->DWords.dw0;
3539 pTmpRsp->DWords.dw0 += cbItem;
3540 }
3541 else
3542 {
3543 GCPtrTop = pTmpRsp->Words.w0;
3544 pTmpRsp->Words.w0 += cbItem;
3545 }
3546 return GCPtrTop;
3547}
3548
3549
3550/**
3551 * Checks if an Intel CPUID feature bit is set.
3552 *
3553 * @returns true / false.
3554 *
3555 * @param pIemCpu The IEM per CPU data.
3556 * @param fEdx The EDX bit to test, or 0 if ECX.
3557 * @param fEcx The ECX bit to test, or 0 if EDX.
3558 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3559 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3560 */
3561static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3562{
3563 uint32_t uEax, uEbx, uEcx, uEdx;
3564 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3565 return (fEcx && (uEcx & fEcx))
3566 || (fEdx && (uEdx & fEdx));
3567}
3568
3569
3570/**
3571 * Checks if an AMD CPUID feature bit is set.
3572 *
3573 * @returns true / false.
3574 *
3575 * @param pIemCpu The IEM per CPU data.
3576 * @param fEdx The EDX bit to test, or 0 if ECX.
3577 * @param fEcx The ECX bit to test, or 0 if EDX.
3578 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3579 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3580 */
3581static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3582{
3583 uint32_t uEax, uEbx, uEcx, uEdx;
3584 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3585 return (fEcx && (uEcx & fEcx))
3586 || (fEdx && (uEdx & fEdx));
3587}
3588
3589/** @} */
3590
3591
3592/** @name FPU access and helpers.
3593 *
3594 * @{
3595 */
3596
3597
3598/**
3599 * Hook for preparing to use the host FPU.
3600 *
3601 * This is necessary in ring-0 and raw-mode context.
3602 *
3603 * @param pIemCpu The IEM per CPU data.
3604 */
3605DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3606{
3607#ifdef IN_RING3
3608 NOREF(pIemCpu);
3609#else
3610/** @todo RZ: FIXME */
3611//# error "Implement me"
3612#endif
3613}
3614
3615
3616/**
3617 * Stores a QNaN value into a FPU register.
3618 *
3619 * @param pReg Pointer to the register.
3620 */
3621DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3622{
3623 pReg->au32[0] = UINT32_C(0x00000000);
3624 pReg->au32[1] = UINT32_C(0xc0000000);
3625 pReg->au16[4] = UINT16_C(0xffff);
3626}
3627
3628
3629/**
3630 * Updates the FOP, FPU.CS and FPUIP registers.
3631 *
3632 * @param pIemCpu The IEM per CPU data.
3633 * @param pCtx The CPU context.
3634 */
3635DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3636{
3637 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3638 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3639 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3640 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3641 {
3642 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3643 * happens in real mode here based on the fnsave and fnstenv images. */
3644 pCtx->fpu.CS = 0;
3645 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3646 }
3647 else
3648 {
3649 pCtx->fpu.CS = pCtx->cs.Sel;
3650 pCtx->fpu.FPUIP = pCtx->rip;
3651 }
3652}
3653
3654
3655/**
3656 * Updates the FPU.DS and FPUDP registers.
3657 *
3658 * @param pIemCpu The IEM per CPU data.
3659 * @param pCtx The CPU context.
3660 * @param iEffSeg The effective segment register.
3661 * @param GCPtrEff The effective address relative to @a iEffSeg.
3662 */
3663DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3664{
3665 RTSEL sel;
3666 switch (iEffSeg)
3667 {
3668 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3669 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3670 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3671 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3672 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3673 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3674 default:
3675 AssertMsgFailed(("%d\n", iEffSeg));
3676 sel = pCtx->ds.Sel;
3677 }
3678 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3679 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3680 {
3681 pCtx->fpu.DS = 0;
3682 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3683 }
3684 else
3685 {
3686 pCtx->fpu.DS = sel;
3687 pCtx->fpu.FPUDP = GCPtrEff;
3688 }
3689}
3690
3691
3692/**
3693 * Rotates the stack registers in the push direction.
3694 *
3695 * @param pCtx The CPU context.
3696 * @remarks This is a complete waste of time, but fxsave stores the registers in
3697 * stack order.
3698 */
3699DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3700{
3701 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3702 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3703 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3704 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3705 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3706 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3707 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3708 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3709 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3710}
3711
3712
3713/**
3714 * Rotates the stack registers in the pop direction.
3715 *
3716 * @param pCtx The CPU context.
3717 * @remarks This is a complete waste of time, but fxsave stores the registers in
3718 * stack order.
3719 */
3720DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3721{
3722 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3723 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3724 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3725 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3726 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3727 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3728 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3729 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3730 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3731}
3732
3733
3734/**
3735 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3736 * exception prevents it.
3737 *
3738 * @param pIemCpu The IEM per CPU data.
3739 * @param pResult The FPU operation result to push.
3740 * @param pCtx The CPU context.
3741 */
3742static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3743{
3744 /* Update FSW and bail if there are pending exceptions afterwards. */
3745 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3746 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3747 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3748 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3749 {
3750 pCtx->fpu.FSW = fFsw;
3751 return;
3752 }
3753
3754 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3755 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3756 {
3757 /* All is fine, push the actual value. */
3758 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3759 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3760 }
3761 else if (pCtx->fpu.FCW & X86_FCW_IM)
3762 {
3763 /* Masked stack overflow, push QNaN. */
3764 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3765 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3766 }
3767 else
3768 {
3769 /* Raise stack overflow, don't push anything. */
3770 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3771 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3772 return;
3773 }
3774
3775 fFsw &= ~X86_FSW_TOP_MASK;
3776 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3777 pCtx->fpu.FSW = fFsw;
3778
3779 iemFpuRotateStackPush(pCtx);
3780}
3781
3782
3783/**
3784 * Stores a result in a FPU register and updates the FSW and FTW.
3785 *
3786 * @param pIemCpu The IEM per CPU data.
3787 * @param pResult The result to store.
3788 * @param iStReg Which FPU register to store it in.
3789 * @param pCtx The CPU context.
3790 */
3791static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3792{
3793 Assert(iStReg < 8);
3794 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3795 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3796 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3797 pCtx->fpu.FTW |= RT_BIT(iReg);
3798 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3799}
3800
3801
3802/**
3803 * Only updates the FPU status word (FSW) with the result of the current
3804 * instruction.
3805 *
3806 * @param pCtx The CPU context.
3807 * @param u16FSW The FSW output of the current instruction.
3808 */
3809static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3810{
3811 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3812 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3813}
3814
3815
3816/**
3817 * Pops one item off the FPU stack if no pending exception prevents it.
3818 *
3819 * @param pCtx The CPU context.
3820 */
3821static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3822{
3823 /* Check pending exceptions. */
3824 uint16_t uFSW = pCtx->fpu.FSW;
3825 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3826 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3827 return;
3828
3829 /* TOP--. */
3830 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3831 uFSW &= ~X86_FSW_TOP_MASK;
3832 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3833 pCtx->fpu.FSW = uFSW;
3834
3835 /* Mark the previous ST0 as empty. */
3836 iOldTop >>= X86_FSW_TOP_SHIFT;
3837 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3838
3839 /* Rotate the registers. */
3840 iemFpuRotateStackPop(pCtx);
3841}
3842
3843
3844/**
3845 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3846 *
3847 * @param pIemCpu The IEM per CPU data.
3848 * @param pResult The FPU operation result to push.
3849 */
3850static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3851{
3852 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3853 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3854 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3855}
3856
3857
3858/**
3859 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3860 * and sets FPUDP and FPUDS.
3861 *
3862 * @param pIemCpu The IEM per CPU data.
3863 * @param pResult The FPU operation result to push.
3864 * @param iEffSeg The effective segment register.
3865 * @param GCPtrEff The effective address relative to @a iEffSeg.
3866 */
3867static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3868{
3869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3870 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3871 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3872 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3873}
3874
3875
3876/**
3877 * Replace ST0 with the first value and push the second onto the FPU stack,
3878 * unless a pending exception prevents it.
3879 *
3880 * @param pIemCpu The IEM per CPU data.
3881 * @param pResult The FPU operation result to store and push.
3882 */
3883static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3884{
3885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3886 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3887
3888 /* Update FSW and bail if there are pending exceptions afterwards. */
3889 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3890 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3891 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3892 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3893 {
3894 pCtx->fpu.FSW = fFsw;
3895 return;
3896 }
3897
3898 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3899 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3900 {
3901 /* All is fine, push the actual value. */
3902 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3903 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3904 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3905 }
3906 else if (pCtx->fpu.FCW & X86_FCW_IM)
3907 {
3908 /* Masked stack overflow, push QNaN. */
3909 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3910 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3911 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3912 }
3913 else
3914 {
3915 /* Raise stack overflow, don't push anything. */
3916 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3917 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3918 return;
3919 }
3920
3921 fFsw &= ~X86_FSW_TOP_MASK;
3922 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3923 pCtx->fpu.FSW = fFsw;
3924
3925 iemFpuRotateStackPush(pCtx);
3926}
3927
3928
3929/**
3930 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3931 * FOP.
3932 *
3933 * @param pIemCpu The IEM per CPU data.
3934 * @param pResult The result to store.
3935 * @param iStReg Which FPU register to store it in.
3936 * @param pCtx The CPU context.
3937 */
3938static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3939{
3940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3941 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3942 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3943}
3944
3945
3946/**
3947 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3948 * FOP, and then pops the stack.
3949 *
3950 * @param pIemCpu The IEM per CPU data.
3951 * @param pResult The result to store.
3952 * @param iStReg Which FPU register to store it in.
3953 * @param pCtx The CPU context.
3954 */
3955static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3956{
3957 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3958 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3959 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3960 iemFpuMaybePopOne(pCtx);
3961}
3962
3963
3964/**
3965 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3966 * FPUDP, and FPUDS.
3967 *
3968 * @param pIemCpu The IEM per CPU data.
3969 * @param pResult The result to store.
3970 * @param iStReg Which FPU register to store it in.
3971 * @param pCtx The CPU context.
3972 * @param iEffSeg The effective memory operand selector register.
3973 * @param GCPtrEff The effective memory operand offset.
3974 */
3975static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3976{
3977 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3978 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3979 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3980 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3981}
3982
3983
3984/**
3985 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3986 * FPUDP, and FPUDS, and then pops the stack.
3987 *
3988 * @param pIemCpu The IEM per CPU data.
3989 * @param pResult The result to store.
3990 * @param iStReg Which FPU register to store it in.
3991 * @param pCtx The CPU context.
3992 * @param iEffSeg The effective memory operand selector register.
3993 * @param GCPtrEff The effective memory operand offset.
3994 */
3995static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
3996 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3997{
3998 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3999 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4000 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4001 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4002 iemFpuMaybePopOne(pCtx);
4003}
4004
4005
4006/**
4007 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4008 *
4009 * @param pIemCpu The IEM per CPU data.
4010 */
4011static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4012{
4013 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4014}
4015
4016
4017/**
4018 * Marks the specified stack register as free (for FFREE).
4019 *
4020 * @param pIemCpu The IEM per CPU data.
4021 * @param iStReg The register to free.
4022 */
4023static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4024{
4025 Assert(iStReg < 8);
4026 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4027 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4028 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4029}
4030
4031
4032/**
4033 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4034 *
4035 * @param pIemCpu The IEM per CPU data.
4036 */
4037static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4038{
4039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4040 uint16_t uFsw = pCtx->fpu.FSW;
4041 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4042 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4043 uFsw &= ~X86_FSW_TOP_MASK;
4044 uFsw |= uTop;
4045 pCtx->fpu.FSW = uFsw;
4046}
4047
4048
4049/**
4050 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4051 *
4052 * @param pIemCpu The IEM per CPU data.
4053 */
4054static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4055{
4056 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4057 uint16_t uFsw = pCtx->fpu.FSW;
4058 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4059 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4060 uFsw &= ~X86_FSW_TOP_MASK;
4061 uFsw |= uTop;
4062 pCtx->fpu.FSW = uFsw;
4063}
4064
4065
4066/**
4067 * Updates the FSW, FOP, FPUIP, and FPUCS.
4068 *
4069 * @param pIemCpu The IEM per CPU data.
4070 * @param u16FSW The FSW from the current instruction.
4071 */
4072static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4073{
4074 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4075 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4076 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4077}
4078
4079
4080/**
4081 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4082 *
4083 * @param pIemCpu The IEM per CPU data.
4084 * @param u16FSW The FSW from the current instruction.
4085 */
4086static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4087{
4088 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4089 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4090 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4091 iemFpuMaybePopOne(pCtx);
4092}
4093
4094
4095/**
4096 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4097 *
4098 * @param pIemCpu The IEM per CPU data.
4099 * @param u16FSW The FSW from the current instruction.
4100 * @param iEffSeg The effective memory operand selector register.
4101 * @param GCPtrEff The effective memory operand offset.
4102 */
4103static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4104{
4105 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4106 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4107 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4108 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4109}
4110
4111
4112/**
4113 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4114 *
4115 * @param pIemCpu The IEM per CPU data.
4116 * @param u16FSW The FSW from the current instruction.
4117 */
4118static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4119{
4120 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4121 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4122 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4123 iemFpuMaybePopOne(pCtx);
4124 iemFpuMaybePopOne(pCtx);
4125}
4126
4127
4128/**
4129 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4130 *
4131 * @param pIemCpu The IEM per CPU data.
4132 * @param u16FSW The FSW from the current instruction.
4133 * @param iEffSeg The effective memory operand selector register.
4134 * @param GCPtrEff The effective memory operand offset.
4135 */
4136static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4137{
4138 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4139 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4140 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4141 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4142 iemFpuMaybePopOne(pCtx);
4143}
4144
4145
4146/**
4147 * Worker routine for raising an FPU stack underflow exception.
4148 *
4149 * @param pIemCpu The IEM per CPU data.
4150 * @param iStReg The stack register being accessed.
4151 * @param pCtx The CPU context.
4152 */
4153static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4154{
4155 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4156 if (pCtx->fpu.FCW & X86_FCW_IM)
4157 {
4158 /* Masked underflow. */
4159 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4160 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4161 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4162 if (iStReg != UINT8_MAX)
4163 {
4164 pCtx->fpu.FTW |= RT_BIT(iReg);
4165 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4166 }
4167 }
4168 else
4169 {
4170 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4171 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4172 }
4173}
4174
4175
4176/**
4177 * Raises a FPU stack underflow exception.
4178 *
4179 * @param pIemCpu The IEM per CPU data.
4180 * @param iStReg The destination register that should be loaded
4181 * with QNaN if \#IS is not masked. Specify
4182 * UINT8_MAX if none (like for fcom).
4183 */
4184DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4185{
4186 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4187 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4188 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4189}
4190
4191
4192DECL_NO_INLINE(static, void)
4193iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4194{
4195 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4196 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4197 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4198 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4199}
4200
4201
4202DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4203{
4204 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4205 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4206 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4207 iemFpuMaybePopOne(pCtx);
4208}
4209
4210
4211DECL_NO_INLINE(static, void)
4212iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4213{
4214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4215 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4216 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4217 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4218 iemFpuMaybePopOne(pCtx);
4219}
4220
4221
4222DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4223{
4224 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4225 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4226 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4227 iemFpuMaybePopOne(pCtx);
4228 iemFpuMaybePopOne(pCtx);
4229}
4230
4231
4232DECL_NO_INLINE(static, void)
4233iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4234{
4235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4236 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4237
4238 if (pCtx->fpu.FCW & X86_FCW_IM)
4239 {
4240 /* Masked overflow - Push QNaN. */
4241 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4242 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4243 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4244 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4245 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4246 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4247 iemFpuRotateStackPush(pCtx);
4248 }
4249 else
4250 {
4251 /* Exception pending - don't change TOP or the register stack. */
4252 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4253 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4254 }
4255}
4256
4257
4258DECL_NO_INLINE(static, void)
4259iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4260{
4261 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4262 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4263
4264 if (pCtx->fpu.FCW & X86_FCW_IM)
4265 {
4266 /* Masked overflow - Push QNaN. */
4267 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4268 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4269 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4270 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4271 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4272 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4273 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4274 iemFpuRotateStackPush(pCtx);
4275 }
4276 else
4277 {
4278 /* Exception pending - don't change TOP or the register stack. */
4279 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4280 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4281 }
4282}
4283
4284
4285/**
4286 * Worker routine for raising an FPU stack overflow exception on a push.
4287 *
4288 * @param pIemCpu The IEM per CPU data.
4289 * @param pCtx The CPU context.
4290 */
4291static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4292{
4293 if (pCtx->fpu.FCW & X86_FCW_IM)
4294 {
4295 /* Masked overflow. */
4296 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4297 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4298 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4299 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4300 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4301 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4302 iemFpuRotateStackPush(pCtx);
4303 }
4304 else
4305 {
4306 /* Exception pending - don't change TOP or the register stack. */
4307 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4308 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4309 }
4310}
4311
4312
4313/**
4314 * Raises a FPU stack overflow exception on a push.
4315 *
4316 * @param pIemCpu The IEM per CPU data.
4317 */
4318DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4319{
4320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4321 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4322 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4323}
4324
4325
4326/**
4327 * Raises a FPU stack overflow exception on a push with a memory operand.
4328 *
4329 * @param pIemCpu The IEM per CPU data.
4330 * @param iEffSeg The effective memory operand selector register.
4331 * @param GCPtrEff The effective memory operand offset.
4332 */
4333DECL_NO_INLINE(static, void)
4334iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4335{
4336 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4337 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4338 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4339 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4340}
4341
4342
4343static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4344{
4345 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4346 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4347 if (pCtx->fpu.FTW & RT_BIT(iReg))
4348 return VINF_SUCCESS;
4349 return VERR_NOT_FOUND;
4350}
4351
4352
4353static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4354{
4355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4356 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4357 if (pCtx->fpu.FTW & RT_BIT(iReg))
4358 {
4359 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4360 return VINF_SUCCESS;
4361 }
4362 return VERR_NOT_FOUND;
4363}
4364
4365
4366static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4367 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4368{
4369 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4370 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4371 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4372 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4373 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4374 {
4375 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4376 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4377 return VINF_SUCCESS;
4378 }
4379 return VERR_NOT_FOUND;
4380}
4381
4382
4383static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4384{
4385 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4386 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4387 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4388 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4389 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4390 {
4391 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4392 return VINF_SUCCESS;
4393 }
4394 return VERR_NOT_FOUND;
4395}
4396
4397
4398/**
4399 * Updates the FPU exception status after FCW is changed.
4400 *
4401 * @param pCtx The CPU context.
4402 */
4403static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4404{
4405 uint16_t u16Fsw = pCtx->fpu.FSW;
4406 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4407 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4408 else
4409 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4410 pCtx->fpu.FSW = u16Fsw;
4411}
4412
4413
4414/**
4415 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4416 *
4417 * @returns The full FTW.
4418 * @param pCtx The CPU state.
4419 */
4420static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4421{
4422 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4423 uint16_t u16Ftw = 0;
4424 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4425 for (unsigned iSt = 0; iSt < 8; iSt++)
4426 {
4427 unsigned const iReg = (iSt + iTop) & 7;
4428 if (!(u8Ftw & RT_BIT(iReg)))
4429 u16Ftw |= 3 << (iReg * 2); /* empty */
4430 else
4431 {
4432 uint16_t uTag;
4433 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4434 if (pr80Reg->s.uExponent == 0x7fff)
4435 uTag = 2; /* Exponent is all 1's => Special. */
4436 else if (pr80Reg->s.uExponent == 0x0000)
4437 {
4438 if (pr80Reg->s.u64Mantissa == 0x0000)
4439 uTag = 1; /* All bits are zero => Zero. */
4440 else
4441 uTag = 2; /* Must be special. */
4442 }
4443 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4444 uTag = 0; /* Valid. */
4445 else
4446 uTag = 2; /* Must be special. */
4447
4448 u16Ftw |= uTag << (iReg * 2); /* empty */
4449 }
4450 }
4451
4452 return u16Ftw;
4453}
4454
4455
4456/**
4457 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4458 *
4459 * @returns The compressed FTW.
4460 * @param u16FullFtw The full FTW to convert.
4461 */
4462static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4463{
4464 uint8_t u8Ftw = 0;
4465 for (unsigned i = 0; i < 8; i++)
4466 {
4467 if ((u16FullFtw & 3) != 3 /*empty*/)
4468 u8Ftw |= RT_BIT(i);
4469 u16FullFtw >>= 2;
4470 }
4471
4472 return u8Ftw;
4473}
4474
4475/** @} */
4476
4477
4478/** @name Memory access.
4479 *
4480 * @{
4481 */
4482
4483
4484/**
4485 * Updates the IEMCPU::cbWritten counter if applicable.
4486 *
4487 * @param pIemCpu The IEM per CPU data.
4488 * @param fAccess The access being accounted for.
4489 * @param cbMem The access size.
4490 */
4491DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4492{
4493 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4494 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4495 pIemCpu->cbWritten += (uint32_t)cbMem;
4496}
4497
4498
4499/**
4500 * Checks if the given segment can be written to, raise the appropriate
4501 * exception if not.
4502 *
4503 * @returns VBox strict status code.
4504 *
4505 * @param pIemCpu The IEM per CPU data.
4506 * @param pHid Pointer to the hidden register.
4507 * @param iSegReg The register number.
4508 */
4509static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4510{
4511 if (!pHid->Attr.n.u1Present)
4512 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4513
4514 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4515 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4516 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4517 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4518
4519 /** @todo DPL/RPL/CPL? */
4520
4521 return VINF_SUCCESS;
4522}
4523
4524
4525/**
4526 * Checks if the given segment can be read from, raise the appropriate
4527 * exception if not.
4528 *
4529 * @returns VBox strict status code.
4530 *
4531 * @param pIemCpu The IEM per CPU data.
4532 * @param pHid Pointer to the hidden register.
4533 * @param iSegReg The register number.
4534 */
4535static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4536{
4537 if (!pHid->Attr.n.u1Present)
4538 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4539
4540 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4541 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4542 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4543
4544 /** @todo DPL/RPL/CPL? */
4545
4546 return VINF_SUCCESS;
4547}
4548
4549
4550/**
4551 * Applies the segment limit, base and attributes.
4552 *
4553 * This may raise a \#GP or \#SS.
4554 *
4555 * @returns VBox strict status code.
4556 *
4557 * @param pIemCpu The IEM per CPU data.
4558 * @param fAccess The kind of access which is being performed.
4559 * @param iSegReg The index of the segment register to apply.
4560 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4561 * TSS, ++).
4562 * @param pGCPtrMem Pointer to the guest memory address to apply
4563 * segmentation to. Input and output parameter.
4564 */
4565static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4566 size_t cbMem, PRTGCPTR pGCPtrMem)
4567{
4568 if (iSegReg == UINT8_MAX)
4569 return VINF_SUCCESS;
4570
4571 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4572 switch (pIemCpu->enmCpuMode)
4573 {
4574 case IEMMODE_16BIT:
4575 case IEMMODE_32BIT:
4576 {
4577 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4578 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4579
4580 Assert(pSel->Attr.n.u1Present);
4581 Assert(pSel->Attr.n.u1DescType);
4582 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4583 {
4584 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4585 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4586 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4587
4588 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4589 {
4590 /** @todo CPL check. */
4591 }
4592
4593 /*
4594 * There are two kinds of data selectors, normal and expand down.
4595 */
4596 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4597 {
4598 if ( GCPtrFirst32 > pSel->u32Limit
4599 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4600 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4601
4602 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4603 }
4604 else
4605 {
4606 /** @todo implement expand down segments. */
4607 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4608 }
4609 }
4610 else
4611 {
4612
4613 /*
4614 * Code selector and usually be used to read thru, writing is
4615 * only permitted in real and V8086 mode.
4616 */
4617 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4618 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4619 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4620 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4621 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4622
4623 if ( GCPtrFirst32 > pSel->u32Limit
4624 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4625 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4626
4627 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4628 {
4629 /** @todo CPL check. */
4630 }
4631
4632 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4633 }
4634 return VINF_SUCCESS;
4635 }
4636
4637 case IEMMODE_64BIT:
4638 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4639 *pGCPtrMem += pSel->u64Base;
4640 return VINF_SUCCESS;
4641
4642 default:
4643 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4644 }
4645}
4646
4647
4648/**
4649 * Translates a virtual address to a physical physical address and checks if we
4650 * can access the page as specified.
4651 *
4652 * @param pIemCpu The IEM per CPU data.
4653 * @param GCPtrMem The virtual address.
4654 * @param fAccess The intended access.
4655 * @param pGCPhysMem Where to return the physical address.
4656 */
4657static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4658 PRTGCPHYS pGCPhysMem)
4659{
4660 /** @todo Need a different PGM interface here. We're currently using
4661 * generic / REM interfaces. this won't cut it for R0 & RC. */
4662 RTGCPHYS GCPhys;
4663 uint64_t fFlags;
4664 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4665 if (RT_FAILURE(rc))
4666 {
4667 /** @todo Check unassigned memory in unpaged mode. */
4668 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4669 *pGCPhysMem = NIL_RTGCPHYS;
4670 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4671 }
4672
4673 /* If the page is writable and does not have the no-exec bit set, all
4674 access is allowed. Otherwise we'll have to check more carefully... */
4675 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4676 {
4677 /* Write to read only memory? */
4678 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4679 && !(fFlags & X86_PTE_RW)
4680 && ( pIemCpu->uCpl != 0
4681 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4682 {
4683 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4684 *pGCPhysMem = NIL_RTGCPHYS;
4685 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4686 }
4687
4688 /* Kernel memory accessed by userland? */
4689 if ( !(fFlags & X86_PTE_US)
4690 && pIemCpu->uCpl == 3
4691 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4692 {
4693 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4694 *pGCPhysMem = NIL_RTGCPHYS;
4695 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4696 }
4697
4698 /* Executing non-executable memory? */
4699 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4700 && (fFlags & X86_PTE_PAE_NX)
4701 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4702 {
4703 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4704 *pGCPhysMem = NIL_RTGCPHYS;
4705 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4706 VERR_ACCESS_DENIED);
4707 }
4708 }
4709
4710 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4711 *pGCPhysMem = GCPhys;
4712 return VINF_SUCCESS;
4713}
4714
4715
4716
4717/**
4718 * Maps a physical page.
4719 *
4720 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4721 * @param pIemCpu The IEM per CPU data.
4722 * @param GCPhysMem The physical address.
4723 * @param fAccess The intended access.
4724 * @param ppvMem Where to return the mapping address.
4725 * @param pLock The PGM lock.
4726 */
4727static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
4728{
4729#ifdef IEM_VERIFICATION_MODE
4730 /* Force the alternative path so we can ignore writes. */
4731 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4732 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4733#endif
4734#ifdef IEM_LOG_MEMORY_ACCESS
4735 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4736#endif
4737
4738 /** @todo This API may require some improving later. A private deal with PGM
4739 * regarding locking and unlocking needs to be struct. A couple of TLBs
4740 * living in PGM, but with publicly accessible inlined access methods
4741 * could perhaps be an even better solution. */
4742 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4743 GCPhysMem,
4744 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4745 pIemCpu->fBypassHandlers,
4746 ppvMem,
4747 pLock);
4748 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4749 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4750 return rc;
4751}
4752
4753
4754/**
4755 * Unmap a page previously mapped by iemMemPageMap.
4756 *
4757 * @param pIemCpu The IEM per CPU data.
4758 * @param GCPhysMem The physical address.
4759 * @param fAccess The intended access.
4760 * @param pvMem What iemMemPageMap returned.
4761 * @param pLock The PGM lock.
4762 */
4763DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
4764{
4765 NOREF(pIemCpu);
4766 NOREF(GCPhysMem);
4767 NOREF(fAccess);
4768 NOREF(pvMem);
4769 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
4770}
4771
4772
4773/**
4774 * Looks up a memory mapping entry.
4775 *
4776 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4777 * @param pIemCpu The IEM per CPU data.
4778 * @param pvMem The memory address.
4779 * @param fAccess The access to.
4780 */
4781DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4782{
4783 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4784 if ( pIemCpu->aMemMappings[0].pv == pvMem
4785 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4786 return 0;
4787 if ( pIemCpu->aMemMappings[1].pv == pvMem
4788 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4789 return 1;
4790 if ( pIemCpu->aMemMappings[2].pv == pvMem
4791 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4792 return 2;
4793 return VERR_NOT_FOUND;
4794}
4795
4796
4797/**
4798 * Finds a free memmap entry when using iNextMapping doesn't work.
4799 *
4800 * @returns Memory mapping index, 1024 on failure.
4801 * @param pIemCpu The IEM per CPU data.
4802 */
4803static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4804{
4805 /*
4806 * The easy case.
4807 */
4808 if (pIemCpu->cActiveMappings == 0)
4809 {
4810 pIemCpu->iNextMapping = 1;
4811 return 0;
4812 }
4813
4814 /* There should be enough mappings for all instructions. */
4815 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4816
4817 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4818 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4819 return i;
4820
4821 AssertFailedReturn(1024);
4822}
4823
4824
4825/**
4826 * Commits a bounce buffer that needs writing back and unmaps it.
4827 *
4828 * @returns Strict VBox status code.
4829 * @param pIemCpu The IEM per CPU data.
4830 * @param iMemMap The index of the buffer to commit.
4831 */
4832static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4833{
4834 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4835 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4836
4837 /*
4838 * Do the writing.
4839 */
4840 int rc;
4841 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4842 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4843 {
4844 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4845 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4846 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4847 if (!pIemCpu->fBypassHandlers)
4848 {
4849 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4850 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4851 pbBuf,
4852 cbFirst);
4853 if (cbSecond && rc == VINF_SUCCESS)
4854 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4855 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4856 pbBuf + cbFirst,
4857 cbSecond);
4858 }
4859 else
4860 {
4861 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4862 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4863 pbBuf,
4864 cbFirst);
4865 if (cbSecond && rc == VINF_SUCCESS)
4866 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4867 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4868 pbBuf + cbFirst,
4869 cbSecond);
4870 }
4871 if (rc != VINF_SUCCESS)
4872 {
4873 /** @todo status code handling */
4874 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
4875 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
4876 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4877 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
4878 }
4879 }
4880 else
4881 rc = VINF_SUCCESS;
4882
4883#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4884 /*
4885 * Record the write(s).
4886 */
4887 if (!pIemCpu->fNoRem)
4888 {
4889 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4890 if (pEvtRec)
4891 {
4892 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4893 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4894 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4895 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4896 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4897 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4898 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4899 }
4900 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4901 {
4902 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4903 if (pEvtRec)
4904 {
4905 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4906 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4907 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4908 memcpy(pEvtRec->u.RamWrite.ab,
4909 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4910 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4911 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4912 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4913 }
4914 }
4915 }
4916#endif
4917#ifdef IEM_LOG_MEMORY_ACCESS
4918 if (rc == VINF_SUCCESS)
4919 {
4920 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4921 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
4922 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4923 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4924 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
4925 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
4926 }
4927#endif
4928
4929 /*
4930 * Free the mapping entry.
4931 */
4932 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4933 Assert(pIemCpu->cActiveMappings != 0);
4934 pIemCpu->cActiveMappings--;
4935 return rc;
4936}
4937
4938
4939/**
4940 * iemMemMap worker that deals with a request crossing pages.
4941 */
4942static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4943 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4944{
4945 /*
4946 * Do the address translations.
4947 */
4948 RTGCPHYS GCPhysFirst;
4949 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4950 if (rcStrict != VINF_SUCCESS)
4951 return rcStrict;
4952
4953 RTGCPHYS GCPhysSecond;
4954 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4955 if (rcStrict != VINF_SUCCESS)
4956 return rcStrict;
4957 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4958
4959 /*
4960 * Read in the current memory content if it's a read, execute or partial
4961 * write access.
4962 */
4963 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4964 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4965 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4966
4967 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4968 {
4969 int rc;
4970 if (!pIemCpu->fBypassHandlers)
4971 {
4972 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4973 if (rc != VINF_SUCCESS)
4974 {
4975 /** @todo status code handling */
4976 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4977 return rc;
4978 }
4979 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4980 if (rc != VINF_SUCCESS)
4981 {
4982 /** @todo status code handling */
4983 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
4984 return rc;
4985 }
4986 }
4987 else
4988 {
4989 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4990 if (rc != VINF_SUCCESS)
4991 {
4992 /** @todo status code handling */
4993 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4994 return rc;
4995 }
4996 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4997 if (rc != VINF_SUCCESS)
4998 {
4999 /** @todo status code handling */
5000 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5001 return rc;
5002 }
5003 }
5004
5005#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5006 if ( !pIemCpu->fNoRem
5007 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5008 {
5009 /*
5010 * Record the reads.
5011 */
5012 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5013 if (pEvtRec)
5014 {
5015 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5016 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5017 pEvtRec->u.RamRead.cb = cbFirstPage;
5018 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5019 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5020 }
5021 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5022 if (pEvtRec)
5023 {
5024 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5025 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5026 pEvtRec->u.RamRead.cb = cbSecondPage;
5027 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5028 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5029 }
5030 }
5031#endif
5032 }
5033#ifdef VBOX_STRICT
5034 else
5035 memset(pbBuf, 0xcc, cbMem);
5036#endif
5037#ifdef VBOX_STRICT
5038 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5039 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5040#endif
5041
5042 /*
5043 * Commit the bounce buffer entry.
5044 */
5045 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5046 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5047 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5048 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5049 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5050 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5051 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5052 pIemCpu->cActiveMappings++;
5053
5054 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5055 *ppvMem = pbBuf;
5056 return VINF_SUCCESS;
5057}
5058
5059
5060/**
5061 * iemMemMap woker that deals with iemMemPageMap failures.
5062 */
5063static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5064 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5065{
5066 /*
5067 * Filter out conditions we can handle and the ones which shouldn't happen.
5068 */
5069 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5070 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5071 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5072 {
5073 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5074 return rcMap;
5075 }
5076 pIemCpu->cPotentialExits++;
5077
5078 /*
5079 * Read in the current memory content if it's a read, execute or partial
5080 * write access.
5081 */
5082 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5083 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5084 {
5085 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5086 memset(pbBuf, 0xff, cbMem);
5087 else
5088 {
5089 int rc;
5090 if (!pIemCpu->fBypassHandlers)
5091 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5092 else
5093 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5094 if (rc != VINF_SUCCESS)
5095 {
5096 /** @todo status code handling */
5097 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5098 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5099 return rc;
5100 }
5101 }
5102
5103#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5104 if ( !pIemCpu->fNoRem
5105 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5106 {
5107 /*
5108 * Record the read.
5109 */
5110 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5111 if (pEvtRec)
5112 {
5113 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5114 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5115 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5116 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5117 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5118 }
5119 }
5120#endif
5121 }
5122#ifdef VBOX_STRICT
5123 else
5124 memset(pbBuf, 0xcc, cbMem);
5125#endif
5126#ifdef VBOX_STRICT
5127 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5128 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5129#endif
5130
5131 /*
5132 * Commit the bounce buffer entry.
5133 */
5134 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5135 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5136 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5137 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5138 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5139 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5140 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5141 pIemCpu->cActiveMappings++;
5142
5143 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5144 *ppvMem = pbBuf;
5145 return VINF_SUCCESS;
5146}
5147
5148
5149
5150/**
5151 * Maps the specified guest memory for the given kind of access.
5152 *
5153 * This may be using bounce buffering of the memory if it's crossing a page
5154 * boundary or if there is an access handler installed for any of it. Because
5155 * of lock prefix guarantees, we're in for some extra clutter when this
5156 * happens.
5157 *
5158 * This may raise a \#GP, \#SS, \#PF or \#AC.
5159 *
5160 * @returns VBox strict status code.
5161 *
5162 * @param pIemCpu The IEM per CPU data.
5163 * @param ppvMem Where to return the pointer to the mapped
5164 * memory.
5165 * @param cbMem The number of bytes to map. This is usually 1,
5166 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5167 * string operations it can be up to a page.
5168 * @param iSegReg The index of the segment register to use for
5169 * this access. The base and limits are checked.
5170 * Use UINT8_MAX to indicate that no segmentation
5171 * is required (for IDT, GDT and LDT accesses).
5172 * @param GCPtrMem The address of the guest memory.
5173 * @param a_fAccess How the memory is being accessed. The
5174 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5175 * how to map the memory, while the
5176 * IEM_ACCESS_WHAT_XXX bit is used when raising
5177 * exceptions.
5178 */
5179static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5180{
5181 /*
5182 * Check the input and figure out which mapping entry to use.
5183 */
5184 Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94);
5185 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5186
5187 unsigned iMemMap = pIemCpu->iNextMapping;
5188 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5189 {
5190 iMemMap = iemMemMapFindFree(pIemCpu);
5191 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5192 }
5193
5194 /*
5195 * Map the memory, checking that we can actually access it. If something
5196 * slightly complicated happens, fall back on bounce buffering.
5197 */
5198 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5199 if (rcStrict != VINF_SUCCESS)
5200 return rcStrict;
5201
5202 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5203 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5204
5205 RTGCPHYS GCPhysFirst;
5206 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5207 if (rcStrict != VINF_SUCCESS)
5208 return rcStrict;
5209
5210 void *pvMem;
5211 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5212 if (rcStrict != VINF_SUCCESS)
5213 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5214
5215 /*
5216 * Fill in the mapping table entry.
5217 */
5218 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5219 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5220 pIemCpu->iNextMapping = iMemMap + 1;
5221 pIemCpu->cActiveMappings++;
5222
5223 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5224 *ppvMem = pvMem;
5225 return VINF_SUCCESS;
5226}
5227
5228
5229/**
5230 * Commits the guest memory if bounce buffered and unmaps it.
5231 *
5232 * @returns Strict VBox status code.
5233 * @param pIemCpu The IEM per CPU data.
5234 * @param pvMem The mapping.
5235 * @param fAccess The kind of access.
5236 */
5237static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5238{
5239 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5240 AssertReturn(iMemMap >= 0, iMemMap);
5241
5242 /* If it's bounce buffered, we may need to write back the buffer. */
5243 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5244 {
5245 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5246 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5247 }
5248 /* Otherwise unlock it. */
5249 else
5250 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5251
5252 /* Free the entry. */
5253 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5254 Assert(pIemCpu->cActiveMappings != 0);
5255 pIemCpu->cActiveMappings--;
5256 return VINF_SUCCESS;
5257}
5258
5259
5260/**
5261 * Fetches a data byte.
5262 *
5263 * @returns Strict VBox status code.
5264 * @param pIemCpu The IEM per CPU data.
5265 * @param pu8Dst Where to return the byte.
5266 * @param iSegReg The index of the segment register to use for
5267 * this access. The base and limits are checked.
5268 * @param GCPtrMem The address of the guest memory.
5269 */
5270static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5271{
5272 /* The lazy approach for now... */
5273 uint8_t const *pu8Src;
5274 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5275 if (rc == VINF_SUCCESS)
5276 {
5277 *pu8Dst = *pu8Src;
5278 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5279 }
5280 return rc;
5281}
5282
5283
5284/**
5285 * Fetches a data word.
5286 *
5287 * @returns Strict VBox status code.
5288 * @param pIemCpu The IEM per CPU data.
5289 * @param pu16Dst Where to return the word.
5290 * @param iSegReg The index of the segment register to use for
5291 * this access. The base and limits are checked.
5292 * @param GCPtrMem The address of the guest memory.
5293 */
5294static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5295{
5296 /* The lazy approach for now... */
5297 uint16_t const *pu16Src;
5298 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5299 if (rc == VINF_SUCCESS)
5300 {
5301 *pu16Dst = *pu16Src;
5302 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5303 }
5304 return rc;
5305}
5306
5307
5308/**
5309 * Fetches a data dword.
5310 *
5311 * @returns Strict VBox status code.
5312 * @param pIemCpu The IEM per CPU data.
5313 * @param pu32Dst Where to return the dword.
5314 * @param iSegReg The index of the segment register to use for
5315 * this access. The base and limits are checked.
5316 * @param GCPtrMem The address of the guest memory.
5317 */
5318static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5319{
5320 /* The lazy approach for now... */
5321 uint32_t const *pu32Src;
5322 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5323 if (rc == VINF_SUCCESS)
5324 {
5325 *pu32Dst = *pu32Src;
5326 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5327 }
5328 return rc;
5329}
5330
5331
5332#ifdef SOME_UNUSED_FUNCTION
5333/**
5334 * Fetches a data dword and sign extends it to a qword.
5335 *
5336 * @returns Strict VBox status code.
5337 * @param pIemCpu The IEM per CPU data.
5338 * @param pu64Dst Where to return the sign extended value.
5339 * @param iSegReg The index of the segment register to use for
5340 * this access. The base and limits are checked.
5341 * @param GCPtrMem The address of the guest memory.
5342 */
5343static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5344{
5345 /* The lazy approach for now... */
5346 int32_t const *pi32Src;
5347 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5348 if (rc == VINF_SUCCESS)
5349 {
5350 *pu64Dst = *pi32Src;
5351 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5352 }
5353#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5354 else
5355 *pu64Dst = 0;
5356#endif
5357 return rc;
5358}
5359#endif
5360
5361
5362/**
5363 * Fetches a data qword.
5364 *
5365 * @returns Strict VBox status code.
5366 * @param pIemCpu The IEM per CPU data.
5367 * @param pu64Dst Where to return the qword.
5368 * @param iSegReg The index of the segment register to use for
5369 * this access. The base and limits are checked.
5370 * @param GCPtrMem The address of the guest memory.
5371 */
5372static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5373{
5374 /* The lazy approach for now... */
5375 uint64_t const *pu64Src;
5376 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5377 if (rc == VINF_SUCCESS)
5378 {
5379 *pu64Dst = *pu64Src;
5380 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5381 }
5382 return rc;
5383}
5384
5385
5386/**
5387 * Fetches a data tword.
5388 *
5389 * @returns Strict VBox status code.
5390 * @param pIemCpu The IEM per CPU data.
5391 * @param pr80Dst Where to return the tword.
5392 * @param iSegReg The index of the segment register to use for
5393 * this access. The base and limits are checked.
5394 * @param GCPtrMem The address of the guest memory.
5395 */
5396static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5397{
5398 /* The lazy approach for now... */
5399 PCRTFLOAT80U pr80Src;
5400 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5401 if (rc == VINF_SUCCESS)
5402 {
5403 *pr80Dst = *pr80Src;
5404 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5405 }
5406 return rc;
5407}
5408
5409
5410/**
5411 * Fetches a descriptor register (lgdt, lidt).
5412 *
5413 * @returns Strict VBox status code.
5414 * @param pIemCpu The IEM per CPU data.
5415 * @param pcbLimit Where to return the limit.
5416 * @param pGCPTrBase Where to return the base.
5417 * @param iSegReg The index of the segment register to use for
5418 * this access. The base and limits are checked.
5419 * @param GCPtrMem The address of the guest memory.
5420 * @param enmOpSize The effective operand size.
5421 */
5422static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5423 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5424{
5425 uint8_t const *pu8Src;
5426 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5427 (void **)&pu8Src,
5428 enmOpSize == IEMMODE_64BIT
5429 ? 2 + 8
5430 : enmOpSize == IEMMODE_32BIT
5431 ? 2 + 4
5432 : 2 + 3,
5433 iSegReg,
5434 GCPtrMem,
5435 IEM_ACCESS_DATA_R);
5436 if (rcStrict == VINF_SUCCESS)
5437 {
5438 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5439 switch (enmOpSize)
5440 {
5441 case IEMMODE_16BIT:
5442 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5443 break;
5444 case IEMMODE_32BIT:
5445 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5446 break;
5447 case IEMMODE_64BIT:
5448 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5449 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5450 break;
5451
5452 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5453 }
5454 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5455 }
5456 return rcStrict;
5457}
5458
5459
5460
5461/**
5462 * Stores a data byte.
5463 *
5464 * @returns Strict VBox status code.
5465 * @param pIemCpu The IEM per CPU data.
5466 * @param iSegReg The index of the segment register to use for
5467 * this access. The base and limits are checked.
5468 * @param GCPtrMem The address of the guest memory.
5469 * @param u8Value The value to store.
5470 */
5471static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5472{
5473 /* The lazy approach for now... */
5474 uint8_t *pu8Dst;
5475 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5476 if (rc == VINF_SUCCESS)
5477 {
5478 *pu8Dst = u8Value;
5479 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5480 }
5481 return rc;
5482}
5483
5484
5485/**
5486 * Stores a data word.
5487 *
5488 * @returns Strict VBox status code.
5489 * @param pIemCpu The IEM per CPU data.
5490 * @param iSegReg The index of the segment register to use for
5491 * this access. The base and limits are checked.
5492 * @param GCPtrMem The address of the guest memory.
5493 * @param u16Value The value to store.
5494 */
5495static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5496{
5497 /* The lazy approach for now... */
5498 uint16_t *pu16Dst;
5499 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5500 if (rc == VINF_SUCCESS)
5501 {
5502 *pu16Dst = u16Value;
5503 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5504 }
5505 return rc;
5506}
5507
5508
5509/**
5510 * Stores a data dword.
5511 *
5512 * @returns Strict VBox status code.
5513 * @param pIemCpu The IEM per CPU data.
5514 * @param iSegReg The index of the segment register to use for
5515 * this access. The base and limits are checked.
5516 * @param GCPtrMem The address of the guest memory.
5517 * @param u32Value The value to store.
5518 */
5519static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5520{
5521 /* The lazy approach for now... */
5522 uint32_t *pu32Dst;
5523 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5524 if (rc == VINF_SUCCESS)
5525 {
5526 *pu32Dst = u32Value;
5527 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5528 }
5529 return rc;
5530}
5531
5532
5533/**
5534 * Stores a data qword.
5535 *
5536 * @returns Strict VBox status code.
5537 * @param pIemCpu The IEM per CPU data.
5538 * @param iSegReg The index of the segment register to use for
5539 * this access. The base and limits are checked.
5540 * @param GCPtrMem The address of the guest memory.
5541 * @param u64Value The value to store.
5542 */
5543static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5544{
5545 /* The lazy approach for now... */
5546 uint64_t *pu64Dst;
5547 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5548 if (rc == VINF_SUCCESS)
5549 {
5550 *pu64Dst = u64Value;
5551 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5552 }
5553 return rc;
5554}
5555
5556
5557/**
5558 * Stores a descriptor register (sgdt, sidt).
5559 *
5560 * @returns Strict VBox status code.
5561 * @param pIemCpu The IEM per CPU data.
5562 * @param cbLimit The limit.
5563 * @param GCPTrBase The base address.
5564 * @param iSegReg The index of the segment register to use for
5565 * this access. The base and limits are checked.
5566 * @param GCPtrMem The address of the guest memory.
5567 * @param enmOpSize The effective operand size.
5568 */
5569static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5570 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5571{
5572 uint8_t *pu8Src;
5573 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5574 (void **)&pu8Src,
5575 enmOpSize == IEMMODE_64BIT
5576 ? 2 + 8
5577 : enmOpSize == IEMMODE_32BIT
5578 ? 2 + 4
5579 : 2 + 3,
5580 iSegReg,
5581 GCPtrMem,
5582 IEM_ACCESS_DATA_W);
5583 if (rcStrict == VINF_SUCCESS)
5584 {
5585 pu8Src[0] = RT_BYTE1(cbLimit);
5586 pu8Src[1] = RT_BYTE2(cbLimit);
5587 pu8Src[2] = RT_BYTE1(GCPtrBase);
5588 pu8Src[3] = RT_BYTE2(GCPtrBase);
5589 pu8Src[4] = RT_BYTE3(GCPtrBase);
5590 if (enmOpSize == IEMMODE_16BIT)
5591 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5592 else
5593 {
5594 pu8Src[5] = RT_BYTE4(GCPtrBase);
5595 if (enmOpSize == IEMMODE_64BIT)
5596 {
5597 pu8Src[6] = RT_BYTE5(GCPtrBase);
5598 pu8Src[7] = RT_BYTE6(GCPtrBase);
5599 pu8Src[8] = RT_BYTE7(GCPtrBase);
5600 pu8Src[9] = RT_BYTE8(GCPtrBase);
5601 }
5602 }
5603 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5604 }
5605 return rcStrict;
5606}
5607
5608
5609/**
5610 * Pushes a word onto the stack.
5611 *
5612 * @returns Strict VBox status code.
5613 * @param pIemCpu The IEM per CPU data.
5614 * @param u16Value The value to push.
5615 */
5616static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5617{
5618 /* Increment the stack pointer. */
5619 uint64_t uNewRsp;
5620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5621 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5622
5623 /* Write the word the lazy way. */
5624 uint16_t *pu16Dst;
5625 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5626 if (rc == VINF_SUCCESS)
5627 {
5628 *pu16Dst = u16Value;
5629 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5630 }
5631
5632 /* Commit the new RSP value unless we an access handler made trouble. */
5633 if (rc == VINF_SUCCESS)
5634 pCtx->rsp = uNewRsp;
5635
5636 return rc;
5637}
5638
5639
5640/**
5641 * Pushes a dword onto the stack.
5642 *
5643 * @returns Strict VBox status code.
5644 * @param pIemCpu The IEM per CPU data.
5645 * @param u32Value The value to push.
5646 */
5647static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5648{
5649 /* Increment the stack pointer. */
5650 uint64_t uNewRsp;
5651 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5652 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5653
5654 /* Write the word the lazy way. */
5655 uint32_t *pu32Dst;
5656 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5657 if (rc == VINF_SUCCESS)
5658 {
5659 *pu32Dst = u32Value;
5660 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5661 }
5662
5663 /* Commit the new RSP value unless we an access handler made trouble. */
5664 if (rc == VINF_SUCCESS)
5665 pCtx->rsp = uNewRsp;
5666
5667 return rc;
5668}
5669
5670
5671/**
5672 * Pushes a qword onto the stack.
5673 *
5674 * @returns Strict VBox status code.
5675 * @param pIemCpu The IEM per CPU data.
5676 * @param u64Value The value to push.
5677 */
5678static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5679{
5680 /* Increment the stack pointer. */
5681 uint64_t uNewRsp;
5682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5683 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5684
5685 /* Write the word the lazy way. */
5686 uint64_t *pu64Dst;
5687 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5688 if (rc == VINF_SUCCESS)
5689 {
5690 *pu64Dst = u64Value;
5691 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5692 }
5693
5694 /* Commit the new RSP value unless we an access handler made trouble. */
5695 if (rc == VINF_SUCCESS)
5696 pCtx->rsp = uNewRsp;
5697
5698 return rc;
5699}
5700
5701
5702/**
5703 * Pops a word from the stack.
5704 *
5705 * @returns Strict VBox status code.
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param pu16Value Where to store the popped value.
5708 */
5709static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5710{
5711 /* Increment the stack pointer. */
5712 uint64_t uNewRsp;
5713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5714 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5715
5716 /* Write the word the lazy way. */
5717 uint16_t const *pu16Src;
5718 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5719 if (rc == VINF_SUCCESS)
5720 {
5721 *pu16Value = *pu16Src;
5722 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5723
5724 /* Commit the new RSP value. */
5725 if (rc == VINF_SUCCESS)
5726 pCtx->rsp = uNewRsp;
5727 }
5728
5729 return rc;
5730}
5731
5732
5733/**
5734 * Pops a dword from the stack.
5735 *
5736 * @returns Strict VBox status code.
5737 * @param pIemCpu The IEM per CPU data.
5738 * @param pu32Value Where to store the popped value.
5739 */
5740static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5741{
5742 /* Increment the stack pointer. */
5743 uint64_t uNewRsp;
5744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5745 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5746
5747 /* Write the word the lazy way. */
5748 uint32_t const *pu32Src;
5749 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5750 if (rc == VINF_SUCCESS)
5751 {
5752 *pu32Value = *pu32Src;
5753 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5754
5755 /* Commit the new RSP value. */
5756 if (rc == VINF_SUCCESS)
5757 pCtx->rsp = uNewRsp;
5758 }
5759
5760 return rc;
5761}
5762
5763
5764/**
5765 * Pops a qword from the stack.
5766 *
5767 * @returns Strict VBox status code.
5768 * @param pIemCpu The IEM per CPU data.
5769 * @param pu64Value Where to store the popped value.
5770 */
5771static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5772{
5773 /* Increment the stack pointer. */
5774 uint64_t uNewRsp;
5775 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5776 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5777
5778 /* Write the word the lazy way. */
5779 uint64_t const *pu64Src;
5780 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5781 if (rc == VINF_SUCCESS)
5782 {
5783 *pu64Value = *pu64Src;
5784 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5785
5786 /* Commit the new RSP value. */
5787 if (rc == VINF_SUCCESS)
5788 pCtx->rsp = uNewRsp;
5789 }
5790
5791 return rc;
5792}
5793
5794
5795/**
5796 * Pushes a word onto the stack, using a temporary stack pointer.
5797 *
5798 * @returns Strict VBox status code.
5799 * @param pIemCpu The IEM per CPU data.
5800 * @param u16Value The value to push.
5801 * @param pTmpRsp Pointer to the temporary stack pointer.
5802 */
5803static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5804{
5805 /* Increment the stack pointer. */
5806 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5807 RTUINT64U NewRsp = *pTmpRsp;
5808 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5809
5810 /* Write the word the lazy way. */
5811 uint16_t *pu16Dst;
5812 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5813 if (rc == VINF_SUCCESS)
5814 {
5815 *pu16Dst = u16Value;
5816 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5817 }
5818
5819 /* Commit the new RSP value unless we an access handler made trouble. */
5820 if (rc == VINF_SUCCESS)
5821 *pTmpRsp = NewRsp;
5822
5823 return rc;
5824}
5825
5826
5827/**
5828 * Pushes a dword onto the stack, using a temporary stack pointer.
5829 *
5830 * @returns Strict VBox status code.
5831 * @param pIemCpu The IEM per CPU data.
5832 * @param u32Value The value to push.
5833 * @param pTmpRsp Pointer to the temporary stack pointer.
5834 */
5835static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5836{
5837 /* Increment the stack pointer. */
5838 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5839 RTUINT64U NewRsp = *pTmpRsp;
5840 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5841
5842 /* Write the word the lazy way. */
5843 uint32_t *pu32Dst;
5844 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5845 if (rc == VINF_SUCCESS)
5846 {
5847 *pu32Dst = u32Value;
5848 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5849 }
5850
5851 /* Commit the new RSP value unless we an access handler made trouble. */
5852 if (rc == VINF_SUCCESS)
5853 *pTmpRsp = NewRsp;
5854
5855 return rc;
5856}
5857
5858
5859/**
5860 * Pushes a dword onto the stack, using a temporary stack pointer.
5861 *
5862 * @returns Strict VBox status code.
5863 * @param pIemCpu The IEM per CPU data.
5864 * @param u64Value The value to push.
5865 * @param pTmpRsp Pointer to the temporary stack pointer.
5866 */
5867static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5868{
5869 /* Increment the stack pointer. */
5870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5871 RTUINT64U NewRsp = *pTmpRsp;
5872 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5873
5874 /* Write the word the lazy way. */
5875 uint64_t *pu64Dst;
5876 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5877 if (rc == VINF_SUCCESS)
5878 {
5879 *pu64Dst = u64Value;
5880 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5881 }
5882
5883 /* Commit the new RSP value unless we an access handler made trouble. */
5884 if (rc == VINF_SUCCESS)
5885 *pTmpRsp = NewRsp;
5886
5887 return rc;
5888}
5889
5890
5891/**
5892 * Pops a word from the stack, using a temporary stack pointer.
5893 *
5894 * @returns Strict VBox status code.
5895 * @param pIemCpu The IEM per CPU data.
5896 * @param pu16Value Where to store the popped value.
5897 * @param pTmpRsp Pointer to the temporary stack pointer.
5898 */
5899static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5900{
5901 /* Increment the stack pointer. */
5902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5903 RTUINT64U NewRsp = *pTmpRsp;
5904 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5905
5906 /* Write the word the lazy way. */
5907 uint16_t const *pu16Src;
5908 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5909 if (rc == VINF_SUCCESS)
5910 {
5911 *pu16Value = *pu16Src;
5912 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5913
5914 /* Commit the new RSP value. */
5915 if (rc == VINF_SUCCESS)
5916 *pTmpRsp = NewRsp;
5917 }
5918
5919 return rc;
5920}
5921
5922
5923/**
5924 * Pops a dword from the stack, using a temporary stack pointer.
5925 *
5926 * @returns Strict VBox status code.
5927 * @param pIemCpu The IEM per CPU data.
5928 * @param pu32Value Where to store the popped value.
5929 * @param pTmpRsp Pointer to the temporary stack pointer.
5930 */
5931static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5932{
5933 /* Increment the stack pointer. */
5934 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5935 RTUINT64U NewRsp = *pTmpRsp;
5936 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5937
5938 /* Write the word the lazy way. */
5939 uint32_t const *pu32Src;
5940 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5941 if (rc == VINF_SUCCESS)
5942 {
5943 *pu32Value = *pu32Src;
5944 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5945
5946 /* Commit the new RSP value. */
5947 if (rc == VINF_SUCCESS)
5948 *pTmpRsp = NewRsp;
5949 }
5950
5951 return rc;
5952}
5953
5954
5955/**
5956 * Pops a qword from the stack, using a temporary stack pointer.
5957 *
5958 * @returns Strict VBox status code.
5959 * @param pIemCpu The IEM per CPU data.
5960 * @param pu64Value Where to store the popped value.
5961 * @param pTmpRsp Pointer to the temporary stack pointer.
5962 */
5963static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5964{
5965 /* Increment the stack pointer. */
5966 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5967 RTUINT64U NewRsp = *pTmpRsp;
5968 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5969
5970 /* Write the word the lazy way. */
5971 uint64_t const *pu64Src;
5972 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5973 if (rcStrict == VINF_SUCCESS)
5974 {
5975 *pu64Value = *pu64Src;
5976 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5977
5978 /* Commit the new RSP value. */
5979 if (rcStrict == VINF_SUCCESS)
5980 *pTmpRsp = NewRsp;
5981 }
5982
5983 return rcStrict;
5984}
5985
5986
5987/**
5988 * Begin a special stack push (used by interrupt, exceptions and such).
5989 *
5990 * This will raise #SS or #PF if appropriate.
5991 *
5992 * @returns Strict VBox status code.
5993 * @param pIemCpu The IEM per CPU data.
5994 * @param cbMem The number of bytes to push onto the stack.
5995 * @param ppvMem Where to return the pointer to the stack memory.
5996 * As with the other memory functions this could be
5997 * direct access or bounce buffered access, so
5998 * don't commit register until the commit call
5999 * succeeds.
6000 * @param puNewRsp Where to return the new RSP value. This must be
6001 * passed unchanged to
6002 * iemMemStackPushCommitSpecial().
6003 */
6004static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6005{
6006 Assert(cbMem < UINT8_MAX);
6007 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6008 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
6009 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6010}
6011
6012
6013/**
6014 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6015 *
6016 * This will update the rSP.
6017 *
6018 * @returns Strict VBox status code.
6019 * @param pIemCpu The IEM per CPU data.
6020 * @param pvMem The pointer returned by
6021 * iemMemStackPushBeginSpecial().
6022 * @param uNewRsp The new RSP value returned by
6023 * iemMemStackPushBeginSpecial().
6024 */
6025static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6026{
6027 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6028 if (rcStrict == VINF_SUCCESS)
6029 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6030 return rcStrict;
6031}
6032
6033
6034/**
6035 * Begin a special stack pop (used by iret, retf and such).
6036 *
6037 * This will raise \#SS or \#PF if appropriate.
6038 *
6039 * @returns Strict VBox status code.
6040 * @param pIemCpu The IEM per CPU data.
6041 * @param cbMem The number of bytes to push onto the stack.
6042 * @param ppvMem Where to return the pointer to the stack memory.
6043 * @param puNewRsp Where to return the new RSP value. This must be
6044 * passed unchanged to
6045 * iemMemStackPopCommitSpecial() or applied
6046 * manually if iemMemStackPopDoneSpecial() is used.
6047 */
6048static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6049{
6050 Assert(cbMem < UINT8_MAX);
6051 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6052 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
6053 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6054}
6055
6056
6057/**
6058 * Continue a special stack pop (used by iret and retf).
6059 *
6060 * This will raise \#SS or \#PF if appropriate.
6061 *
6062 * @returns Strict VBox status code.
6063 * @param pIemCpu The IEM per CPU data.
6064 * @param cbMem The number of bytes to push onto the stack.
6065 * @param ppvMem Where to return the pointer to the stack memory.
6066 * @param puNewRsp Where to return the new RSP value. This must be
6067 * passed unchanged to
6068 * iemMemStackPopCommitSpecial() or applied
6069 * manually if iemMemStackPopDoneSpecial() is used.
6070 */
6071static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6072{
6073 Assert(cbMem < UINT8_MAX);
6074 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6075 RTUINT64U NewRsp;
6076 NewRsp.u = *puNewRsp;
6077 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6078 *puNewRsp = NewRsp.u;
6079 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6080}
6081
6082
6083/**
6084 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6085 *
6086 * This will update the rSP.
6087 *
6088 * @returns Strict VBox status code.
6089 * @param pIemCpu The IEM per CPU data.
6090 * @param pvMem The pointer returned by
6091 * iemMemStackPopBeginSpecial().
6092 * @param uNewRsp The new RSP value returned by
6093 * iemMemStackPopBeginSpecial().
6094 */
6095static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6096{
6097 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6098 if (rcStrict == VINF_SUCCESS)
6099 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6100 return rcStrict;
6101}
6102
6103
6104/**
6105 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6106 * iemMemStackPopContinueSpecial).
6107 *
6108 * The caller will manually commit the rSP.
6109 *
6110 * @returns Strict VBox status code.
6111 * @param pIemCpu The IEM per CPU data.
6112 * @param pvMem The pointer returned by
6113 * iemMemStackPopBeginSpecial() or
6114 * iemMemStackPopContinueSpecial().
6115 */
6116static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6117{
6118 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6119}
6120
6121
6122/**
6123 * Fetches a system table dword.
6124 *
6125 * @returns Strict VBox status code.
6126 * @param pIemCpu The IEM per CPU data.
6127 * @param pu32Dst Where to return the dword.
6128 * @param iSegReg The index of the segment register to use for
6129 * this access. The base and limits are checked.
6130 * @param GCPtrMem The address of the guest memory.
6131 */
6132static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6133{
6134 /* The lazy approach for now... */
6135 uint32_t const *pu32Src;
6136 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6137 if (rc == VINF_SUCCESS)
6138 {
6139 *pu32Dst = *pu32Src;
6140 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6141 }
6142 return rc;
6143}
6144
6145
6146/**
6147 * Fetches a system table qword.
6148 *
6149 * @returns Strict VBox status code.
6150 * @param pIemCpu The IEM per CPU data.
6151 * @param pu64Dst Where to return the qword.
6152 * @param iSegReg The index of the segment register to use for
6153 * this access. The base and limits are checked.
6154 * @param GCPtrMem The address of the guest memory.
6155 */
6156static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6157{
6158 /* The lazy approach for now... */
6159 uint64_t const *pu64Src;
6160 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6161 if (rc == VINF_SUCCESS)
6162 {
6163 *pu64Dst = *pu64Src;
6164 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6165 }
6166 return rc;
6167}
6168
6169
6170/**
6171 * Fetches a descriptor table entry.
6172 *
6173 * @returns Strict VBox status code.
6174 * @param pIemCpu The IEM per CPU.
6175 * @param pDesc Where to return the descriptor table entry.
6176 * @param uSel The selector which table entry to fetch.
6177 */
6178static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6179{
6180 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6181
6182 /** @todo did the 286 require all 8 bytes to be accessible? */
6183 /*
6184 * Get the selector table base and check bounds.
6185 */
6186 RTGCPTR GCPtrBase;
6187 if (uSel & X86_SEL_LDT)
6188 {
6189 if ( !pCtx->ldtr.Attr.n.u1Present
6190 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6191 {
6192 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6193 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6194 /** @todo is this the right exception? */
6195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6196 }
6197
6198 Assert(pCtx->ldtr.Attr.n.u1Present);
6199 GCPtrBase = pCtx->ldtr.u64Base;
6200 }
6201 else
6202 {
6203 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6204 {
6205 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6206 /** @todo is this the right exception? */
6207 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6208 }
6209 GCPtrBase = pCtx->gdtr.pGdt;
6210 }
6211
6212 /*
6213 * Read the legacy descriptor and maybe the long mode extensions if
6214 * required.
6215 */
6216 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6217 if (rcStrict == VINF_SUCCESS)
6218 {
6219 if ( !IEM_IS_LONG_MODE(pIemCpu)
6220 || pDesc->Legacy.Gen.u1DescType)
6221 pDesc->Long.au64[1] = 0;
6222 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6223 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6224 else
6225 {
6226 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6227 /** @todo is this the right exception? */
6228 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6229 }
6230 }
6231 return rcStrict;
6232}
6233
6234
6235/**
6236 * Fakes a long mode stack selector for SS = 0.
6237 *
6238 * @param pDescSs Where to return the fake stack descriptor.
6239 * @param uDpl The DPL we want.
6240 */
6241static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6242{
6243 pDescSs->Long.au64[0] = 0;
6244 pDescSs->Long.au64[1] = 0;
6245 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6246 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6247 pDescSs->Long.Gen.u2Dpl = uDpl;
6248 pDescSs->Long.Gen.u1Present = 1;
6249 pDescSs->Long.Gen.u1Long = 1;
6250}
6251
6252
6253/**
6254 * Marks the selector descriptor as accessed (only non-system descriptors).
6255 *
6256 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6257 * will therefore skip the limit checks.
6258 *
6259 * @returns Strict VBox status code.
6260 * @param pIemCpu The IEM per CPU.
6261 * @param uSel The selector.
6262 */
6263static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6264{
6265 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6266
6267 /*
6268 * Get the selector table base and calculate the entry address.
6269 */
6270 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6271 ? pCtx->ldtr.u64Base
6272 : pCtx->gdtr.pGdt;
6273 GCPtr += uSel & X86_SEL_MASK;
6274
6275 /*
6276 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6277 * ugly stuff to avoid this. This will make sure it's an atomic access
6278 * as well more or less remove any question about 8-bit or 32-bit accesss.
6279 */
6280 VBOXSTRICTRC rcStrict;
6281 uint32_t volatile *pu32;
6282 if ((GCPtr & 3) == 0)
6283 {
6284 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6285 GCPtr += 2 + 2;
6286 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6287 if (rcStrict != VINF_SUCCESS)
6288 return rcStrict;
6289 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6290 }
6291 else
6292 {
6293 /* The misaligned GDT/LDT case, map the whole thing. */
6294 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6295 if (rcStrict != VINF_SUCCESS)
6296 return rcStrict;
6297 switch ((uintptr_t)pu32 & 3)
6298 {
6299 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6300 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6301 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6302 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6303 }
6304 }
6305
6306 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6307}
6308
6309/** @} */
6310
6311
6312/*
6313 * Include the C/C++ implementation of instruction.
6314 */
6315#include "IEMAllCImpl.cpp.h"
6316
6317
6318
6319/** @name "Microcode" macros.
6320 *
6321 * The idea is that we should be able to use the same code to interpret
6322 * instructions as well as recompiler instructions. Thus this obfuscation.
6323 *
6324 * @{
6325 */
6326#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6327#define IEM_MC_END() }
6328#define IEM_MC_PAUSE() do {} while (0)
6329#define IEM_MC_CONTINUE() do {} while (0)
6330
6331/** Internal macro. */
6332#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6333 do \
6334 { \
6335 VBOXSTRICTRC rcStrict2 = a_Expr; \
6336 if (rcStrict2 != VINF_SUCCESS) \
6337 return rcStrict2; \
6338 } while (0)
6339
6340#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6341#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6342#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6343#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6344#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6345#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6346#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6347
6348#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6349#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6350 do { \
6351 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6352 return iemRaiseDeviceNotAvailable(pIemCpu); \
6353 } while (0)
6354#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6355 do { \
6356 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6357 return iemRaiseMathFault(pIemCpu); \
6358 } while (0)
6359#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6360 do { \
6361 if (pIemCpu->uCpl != 0) \
6362 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6363 } while (0)
6364
6365
6366#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6367#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6368#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6369#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6370#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6371#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6372#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6373 uint32_t a_Name; \
6374 uint32_t *a_pName = &a_Name
6375#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6376 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6377
6378#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6379#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6380
6381#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6382#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6383#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6384#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6385#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6386#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6387#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6388#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6389#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6390#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6391#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6392#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6393#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6394#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6395#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6396#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6397#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6398#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6399#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6400#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6401#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6402#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6403#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6404#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6405#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6406#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6407#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6408#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6409#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6410/** @note Not for IOPL or IF testing or modification. */
6411#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6412#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6413#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6414#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6415
6416#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6417#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6418#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6419#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6420#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6421#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6422#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6423#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6424#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6425#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6426#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6427 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6428
6429#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6430#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6431/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6432 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6433#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6434#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6435/** @note Not for IOPL or IF testing or modification. */
6436#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6437
6438#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6439#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6440#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6441 do { \
6442 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6443 *pu32Reg += (a_u32Value); \
6444 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6445 } while (0)
6446#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6447
6448#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6449#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6450#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6451 do { \
6452 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6453 *pu32Reg -= (a_u32Value); \
6454 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6455 } while (0)
6456#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6457
6458#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6459#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6460#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6461#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6462#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6463#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6464#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6465
6466#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6467#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6468#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6469#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6470
6471#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6472#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6473#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6474
6475#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6476#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6477
6478#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6479#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6480#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6481
6482#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6483#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6484#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6485
6486#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6487
6488#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6489
6490#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6491#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6492#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6493 do { \
6494 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6495 *pu32Reg &= (a_u32Value); \
6496 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6497 } while (0)
6498#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6499
6500#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6501#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6502#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6503 do { \
6504 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6505 *pu32Reg |= (a_u32Value); \
6506 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6507 } while (0)
6508#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6509
6510
6511/** @note Not for IOPL or IF modification. */
6512#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6513/** @note Not for IOPL or IF modification. */
6514#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6515/** @note Not for IOPL or IF modification. */
6516#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6517
6518#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6519
6520
6521#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6522 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6523#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6524 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6525#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6526 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6527
6528#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6529 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6530#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6531 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6532#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6533 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6534
6535#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6536 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6537#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6538 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6539#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6541
6542#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6543 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6544
6545#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6546 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6547#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6549
6550#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6551 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6552#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6554#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6555 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6556
6557
6558#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6559 do { \
6560 uint8_t u8Tmp; \
6561 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6562 (a_u16Dst) = u8Tmp; \
6563 } while (0)
6564#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6565 do { \
6566 uint8_t u8Tmp; \
6567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6568 (a_u32Dst) = u8Tmp; \
6569 } while (0)
6570#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6571 do { \
6572 uint8_t u8Tmp; \
6573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6574 (a_u64Dst) = u8Tmp; \
6575 } while (0)
6576#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6577 do { \
6578 uint16_t u16Tmp; \
6579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6580 (a_u32Dst) = u16Tmp; \
6581 } while (0)
6582#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6583 do { \
6584 uint16_t u16Tmp; \
6585 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6586 (a_u64Dst) = u16Tmp; \
6587 } while (0)
6588#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6589 do { \
6590 uint32_t u32Tmp; \
6591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6592 (a_u64Dst) = u32Tmp; \
6593 } while (0)
6594
6595#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6596 do { \
6597 uint8_t u8Tmp; \
6598 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6599 (a_u16Dst) = (int8_t)u8Tmp; \
6600 } while (0)
6601#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6602 do { \
6603 uint8_t u8Tmp; \
6604 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6605 (a_u32Dst) = (int8_t)u8Tmp; \
6606 } while (0)
6607#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6608 do { \
6609 uint8_t u8Tmp; \
6610 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6611 (a_u64Dst) = (int8_t)u8Tmp; \
6612 } while (0)
6613#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6614 do { \
6615 uint16_t u16Tmp; \
6616 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6617 (a_u32Dst) = (int16_t)u16Tmp; \
6618 } while (0)
6619#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6620 do { \
6621 uint16_t u16Tmp; \
6622 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6623 (a_u64Dst) = (int16_t)u16Tmp; \
6624 } while (0)
6625#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6626 do { \
6627 uint32_t u32Tmp; \
6628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6629 (a_u64Dst) = (int32_t)u32Tmp; \
6630 } while (0)
6631
6632#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6633 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6634#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6635 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6636#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6637 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6638#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6639 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6640
6641#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6642 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6643#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6644 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6645#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6646 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6647#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6648 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6649
6650#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6651#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6652#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6653#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6654#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6655#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6656#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6657 do { \
6658 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6659 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6660 } while (0)
6661
6662
6663#define IEM_MC_PUSH_U16(a_u16Value) \
6664 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6665#define IEM_MC_PUSH_U32(a_u32Value) \
6666 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6667#define IEM_MC_PUSH_U64(a_u64Value) \
6668 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6669
6670#define IEM_MC_POP_U16(a_pu16Value) \
6671 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6672#define IEM_MC_POP_U32(a_pu32Value) \
6673 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6674#define IEM_MC_POP_U64(a_pu64Value) \
6675 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6676
6677/** Maps guest memory for direct or bounce buffered access.
6678 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6679 * @remarks May return.
6680 */
6681#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6682 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6683
6684/** Maps guest memory for direct or bounce buffered access.
6685 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6686 * @remarks May return.
6687 */
6688#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6689 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6690
6691/** Commits the memory and unmaps the guest memory.
6692 * @remarks May return.
6693 */
6694#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6695 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6696
6697/** Commits the memory and unmaps the guest memory unless the FPU status word
6698 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6699 * that would cause FLD not to store.
6700 *
6701 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6702 * store, while \#P will not.
6703 *
6704 * @remarks May in theory return - for now.
6705 */
6706#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6707 do { \
6708 if ( !(a_u16FSW & X86_FSW_ES) \
6709 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6710 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6711 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6712 } while (0)
6713
6714/** Calculate efficient address from R/M. */
6715#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6716 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6717
6718#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6719#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6720#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6721#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6722#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6723
6724/**
6725 * Defers the rest of the instruction emulation to a C implementation routine
6726 * and returns, only taking the standard parameters.
6727 *
6728 * @param a_pfnCImpl The pointer to the C routine.
6729 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6730 */
6731#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6732
6733/**
6734 * Defers the rest of instruction emulation to a C implementation routine and
6735 * returns, taking one argument in addition to the standard ones.
6736 *
6737 * @param a_pfnCImpl The pointer to the C routine.
6738 * @param a0 The argument.
6739 */
6740#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6741
6742/**
6743 * Defers the rest of the instruction emulation to a C implementation routine
6744 * and returns, taking two arguments in addition to the standard ones.
6745 *
6746 * @param a_pfnCImpl The pointer to the C routine.
6747 * @param a0 The first extra argument.
6748 * @param a1 The second extra argument.
6749 */
6750#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6751
6752/**
6753 * Defers the rest of the instruction emulation to a C implementation routine
6754 * and returns, taking two arguments in addition to the standard ones.
6755 *
6756 * @param a_pfnCImpl The pointer to the C routine.
6757 * @param a0 The first extra argument.
6758 * @param a1 The second extra argument.
6759 * @param a2 The third extra argument.
6760 */
6761#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6762
6763/**
6764 * Defers the rest of the instruction emulation to a C implementation routine
6765 * and returns, taking two arguments in addition to the standard ones.
6766 *
6767 * @param a_pfnCImpl The pointer to the C routine.
6768 * @param a0 The first extra argument.
6769 * @param a1 The second extra argument.
6770 * @param a2 The third extra argument.
6771 * @param a3 The fourth extra argument.
6772 * @param a4 The fifth extra argument.
6773 */
6774#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6775
6776/**
6777 * Defers the entire instruction emulation to a C implementation routine and
6778 * returns, only taking the standard parameters.
6779 *
6780 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6781 *
6782 * @param a_pfnCImpl The pointer to the C routine.
6783 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6784 */
6785#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6786
6787/**
6788 * Defers the entire instruction emulation to a C implementation routine and
6789 * returns, taking one argument in addition to the standard ones.
6790 *
6791 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6792 *
6793 * @param a_pfnCImpl The pointer to the C routine.
6794 * @param a0 The argument.
6795 */
6796#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6797
6798/**
6799 * Defers the entire instruction emulation to a C implementation routine and
6800 * returns, taking two arguments in addition to the standard ones.
6801 *
6802 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6803 *
6804 * @param a_pfnCImpl The pointer to the C routine.
6805 * @param a0 The first extra argument.
6806 * @param a1 The second extra argument.
6807 */
6808#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6809
6810/**
6811 * Defers the entire instruction emulation to a C implementation routine and
6812 * returns, taking three arguments in addition to the standard ones.
6813 *
6814 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6815 *
6816 * @param a_pfnCImpl The pointer to the C routine.
6817 * @param a0 The first extra argument.
6818 * @param a1 The second extra argument.
6819 * @param a2 The third extra argument.
6820 */
6821#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6822
6823/**
6824 * Calls a FPU assembly implementation taking one visible argument.
6825 *
6826 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6827 * @param a0 The first extra argument.
6828 */
6829#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6830 do { \
6831 iemFpuPrepareUsage(pIemCpu); \
6832 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6833 } while (0)
6834
6835/**
6836 * Calls a FPU assembly implementation taking two visible arguments.
6837 *
6838 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6839 * @param a0 The first extra argument.
6840 * @param a1 The second extra argument.
6841 */
6842#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6843 do { \
6844 iemFpuPrepareUsage(pIemCpu); \
6845 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6846 } while (0)
6847
6848/**
6849 * Calls a FPU assembly implementation taking three visible arguments.
6850 *
6851 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6852 * @param a0 The first extra argument.
6853 * @param a1 The second extra argument.
6854 * @param a2 The third extra argument.
6855 */
6856#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6857 do { \
6858 iemFpuPrepareUsage(pIemCpu); \
6859 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6860 } while (0)
6861
6862#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6863 do { \
6864 (a_FpuData).FSW = (a_FSW); \
6865 (a_FpuData).r80Result = *(a_pr80Value); \
6866 } while (0)
6867
6868/** Pushes FPU result onto the stack. */
6869#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6870 iemFpuPushResult(pIemCpu, &a_FpuData)
6871/** Pushes FPU result onto the stack and sets the FPUDP. */
6872#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6873 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6874
6875/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6876#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6877 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6878
6879/** Stores FPU result in a stack register. */
6880#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6881 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6882/** Stores FPU result in a stack register and pops the stack. */
6883#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6884 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6885/** Stores FPU result in a stack register and sets the FPUDP. */
6886#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6887 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6888/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6889 * stack. */
6890#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6891 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6892
6893/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6894#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6895 iemFpuUpdateOpcodeAndIp(pIemCpu)
6896/** Free a stack register (for FFREE and FFREEP). */
6897#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6898 iemFpuStackFree(pIemCpu, a_iStReg)
6899/** Increment the FPU stack pointer. */
6900#define IEM_MC_FPU_STACK_INC_TOP() \
6901 iemFpuStackIncTop(pIemCpu)
6902/** Decrement the FPU stack pointer. */
6903#define IEM_MC_FPU_STACK_DEC_TOP() \
6904 iemFpuStackDecTop(pIemCpu)
6905
6906/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6907#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6908 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6909/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6910#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6911 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6912/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6913#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6914 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6915/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6916#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6917 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6918/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6919 * stack. */
6920#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6921 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6922/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6923#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6924 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6925
6926/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6927#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6928 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6929/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6930 * stack. */
6931#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6932 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6933/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6934 * FPUDS. */
6935#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6936 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6937/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6938 * FPUDS. Pops stack. */
6939#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6940 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6941/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6942 * stack twice. */
6943#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6944 iemFpuStackUnderflowThenPopPop(pIemCpu)
6945/** Raises a FPU stack underflow exception for an instruction pushing a result
6946 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6947#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6948 iemFpuStackPushUnderflow(pIemCpu)
6949/** Raises a FPU stack underflow exception for an instruction pushing a result
6950 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6951#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6952 iemFpuStackPushUnderflowTwo(pIemCpu)
6953
6954/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6955 * FPUIP, FPUCS and FOP. */
6956#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6957 iemFpuStackPushOverflow(pIemCpu)
6958/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6959 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6960#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6961 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6962/** Indicates that we (might) have modified the FPU state. */
6963#define IEM_MC_USED_FPU() \
6964 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
6965
6966/** @note Not for IOPL or IF testing. */
6967#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6968/** @note Not for IOPL or IF testing. */
6969#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6970/** @note Not for IOPL or IF testing. */
6971#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6972/** @note Not for IOPL or IF testing. */
6973#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6974/** @note Not for IOPL or IF testing. */
6975#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6976 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6977 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6978/** @note Not for IOPL or IF testing. */
6979#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
6980 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6981 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6982/** @note Not for IOPL or IF testing. */
6983#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
6984 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6985 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6986 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6987/** @note Not for IOPL or IF testing. */
6988#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
6989 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6990 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6991 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6992#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
6993#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
6994#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
6995/** @note Not for IOPL or IF testing. */
6996#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6997 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6998 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6999/** @note Not for IOPL or IF testing. */
7000#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7001 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7002 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7003/** @note Not for IOPL or IF testing. */
7004#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7005 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7006 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7007/** @note Not for IOPL or IF testing. */
7008#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7009 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7010 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7011/** @note Not for IOPL or IF testing. */
7012#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7013 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7014 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7015/** @note Not for IOPL or IF testing. */
7016#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7017 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7018 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7019#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7020#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7021#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7022 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7023#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7024 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7025#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7026 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7027#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7028 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7029#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7030 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7031#define IEM_MC_IF_FCW_IM() \
7032 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7033
7034#define IEM_MC_ELSE() } else {
7035#define IEM_MC_ENDIF() } do {} while (0)
7036
7037/** @} */
7038
7039
7040/** @name Opcode Debug Helpers.
7041 * @{
7042 */
7043#ifdef DEBUG
7044# define IEMOP_MNEMONIC(a_szMnemonic) \
7045 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7046 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7047# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7048 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7049 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7050#else
7051# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7052# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7053#endif
7054
7055/** @} */
7056
7057
7058/** @name Opcode Helpers.
7059 * @{
7060 */
7061
7062/** The instruction raises an \#UD in real and V8086 mode. */
7063#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7064 do \
7065 { \
7066 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7067 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7068 } while (0)
7069
7070/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7071 * lock prefixed.
7072 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7073#define IEMOP_HLP_NO_LOCK_PREFIX() \
7074 do \
7075 { \
7076 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7077 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7078 } while (0)
7079
7080/** The instruction is not available in 64-bit mode, throw #UD if we're in
7081 * 64-bit mode. */
7082#define IEMOP_HLP_NO_64BIT() \
7083 do \
7084 { \
7085 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7086 return IEMOP_RAISE_INVALID_OPCODE(); \
7087 } while (0)
7088
7089/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7090#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7091 do \
7092 { \
7093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7094 iemRecalEffOpSize64Default(pIemCpu); \
7095 } while (0)
7096
7097/** The instruction has 64-bit operand size if 64-bit mode. */
7098#define IEMOP_HLP_64BIT_OP_SIZE() \
7099 do \
7100 { \
7101 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7102 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7103 } while (0)
7104
7105/**
7106 * Done decoding.
7107 */
7108#define IEMOP_HLP_DONE_DECODING() \
7109 do \
7110 { \
7111 /*nothing for now, maybe later... */ \
7112 } while (0)
7113
7114/**
7115 * Done decoding, raise \#UD exception if lock prefix present.
7116 */
7117#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7118 do \
7119 { \
7120 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7121 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7122 } while (0)
7123
7124
7125/**
7126 * Calculates the effective address of a ModR/M memory operand.
7127 *
7128 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7129 *
7130 * @return Strict VBox status code.
7131 * @param pIemCpu The IEM per CPU data.
7132 * @param bRm The ModRM byte.
7133 * @param pGCPtrEff Where to return the effective address.
7134 */
7135static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
7136{
7137 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7138 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7139#define SET_SS_DEF() \
7140 do \
7141 { \
7142 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7143 pIemCpu->iEffSeg = X86_SREG_SS; \
7144 } while (0)
7145
7146/** @todo Check the effective address size crap! */
7147 switch (pIemCpu->enmEffAddrMode)
7148 {
7149 case IEMMODE_16BIT:
7150 {
7151 uint16_t u16EffAddr;
7152
7153 /* Handle the disp16 form with no registers first. */
7154 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7155 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7156 else
7157 {
7158 /* Get the displacment. */
7159 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7160 {
7161 case 0: u16EffAddr = 0; break;
7162 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7163 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7164 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7165 }
7166
7167 /* Add the base and index registers to the disp. */
7168 switch (bRm & X86_MODRM_RM_MASK)
7169 {
7170 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7171 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7172 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7173 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7174 case 4: u16EffAddr += pCtx->si; break;
7175 case 5: u16EffAddr += pCtx->di; break;
7176 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7177 case 7: u16EffAddr += pCtx->bx; break;
7178 }
7179 }
7180
7181 *pGCPtrEff = u16EffAddr;
7182 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
7183 return VINF_SUCCESS;
7184 }
7185
7186 case IEMMODE_32BIT:
7187 {
7188 uint32_t u32EffAddr;
7189
7190 /* Handle the disp32 form with no registers first. */
7191 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7192 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7193 else
7194 {
7195 /* Get the register (or SIB) value. */
7196 switch ((bRm & X86_MODRM_RM_MASK))
7197 {
7198 case 0: u32EffAddr = pCtx->eax; break;
7199 case 1: u32EffAddr = pCtx->ecx; break;
7200 case 2: u32EffAddr = pCtx->edx; break;
7201 case 3: u32EffAddr = pCtx->ebx; break;
7202 case 4: /* SIB */
7203 {
7204 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7205
7206 /* Get the index and scale it. */
7207 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7208 {
7209 case 0: u32EffAddr = pCtx->eax; break;
7210 case 1: u32EffAddr = pCtx->ecx; break;
7211 case 2: u32EffAddr = pCtx->edx; break;
7212 case 3: u32EffAddr = pCtx->ebx; break;
7213 case 4: u32EffAddr = 0; /*none */ break;
7214 case 5: u32EffAddr = pCtx->ebp; break;
7215 case 6: u32EffAddr = pCtx->esi; break;
7216 case 7: u32EffAddr = pCtx->edi; break;
7217 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7218 }
7219 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7220
7221 /* add base */
7222 switch (bSib & X86_SIB_BASE_MASK)
7223 {
7224 case 0: u32EffAddr += pCtx->eax; break;
7225 case 1: u32EffAddr += pCtx->ecx; break;
7226 case 2: u32EffAddr += pCtx->edx; break;
7227 case 3: u32EffAddr += pCtx->ebx; break;
7228 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7229 case 5:
7230 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7231 {
7232 u32EffAddr += pCtx->ebp;
7233 SET_SS_DEF();
7234 }
7235 else
7236 {
7237 uint32_t u32Disp;
7238 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7239 u32EffAddr += u32Disp;
7240 }
7241 break;
7242 case 6: u32EffAddr += pCtx->esi; break;
7243 case 7: u32EffAddr += pCtx->edi; break;
7244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7245 }
7246 break;
7247 }
7248 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7249 case 6: u32EffAddr = pCtx->esi; break;
7250 case 7: u32EffAddr = pCtx->edi; break;
7251 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7252 }
7253
7254 /* Get and add the displacement. */
7255 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7256 {
7257 case 0:
7258 break;
7259 case 1:
7260 {
7261 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7262 u32EffAddr += i8Disp;
7263 break;
7264 }
7265 case 2:
7266 {
7267 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7268 u32EffAddr += u32Disp;
7269 break;
7270 }
7271 default:
7272 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7273 }
7274
7275 }
7276 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7277 *pGCPtrEff = u32EffAddr;
7278 else
7279 {
7280 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7281 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7282 }
7283 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7284 return VINF_SUCCESS;
7285 }
7286
7287 case IEMMODE_64BIT:
7288 {
7289 uint64_t u64EffAddr;
7290
7291 /* Handle the rip+disp32 form with no registers first. */
7292 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7293 {
7294 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7295 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
7296 }
7297 else
7298 {
7299 /* Get the register (or SIB) value. */
7300 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7301 {
7302 case 0: u64EffAddr = pCtx->rax; break;
7303 case 1: u64EffAddr = pCtx->rcx; break;
7304 case 2: u64EffAddr = pCtx->rdx; break;
7305 case 3: u64EffAddr = pCtx->rbx; break;
7306 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7307 case 6: u64EffAddr = pCtx->rsi; break;
7308 case 7: u64EffAddr = pCtx->rdi; break;
7309 case 8: u64EffAddr = pCtx->r8; break;
7310 case 9: u64EffAddr = pCtx->r9; break;
7311 case 10: u64EffAddr = pCtx->r10; break;
7312 case 11: u64EffAddr = pCtx->r11; break;
7313 case 13: u64EffAddr = pCtx->r13; break;
7314 case 14: u64EffAddr = pCtx->r14; break;
7315 case 15: u64EffAddr = pCtx->r15; break;
7316 /* SIB */
7317 case 4:
7318 case 12:
7319 {
7320 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7321
7322 /* Get the index and scale it. */
7323 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7324 {
7325 case 0: u64EffAddr = pCtx->rax; break;
7326 case 1: u64EffAddr = pCtx->rcx; break;
7327 case 2: u64EffAddr = pCtx->rdx; break;
7328 case 3: u64EffAddr = pCtx->rbx; break;
7329 case 4: u64EffAddr = 0; /*none */ break;
7330 case 5: u64EffAddr = pCtx->rbp; break;
7331 case 6: u64EffAddr = pCtx->rsi; break;
7332 case 7: u64EffAddr = pCtx->rdi; break;
7333 case 8: u64EffAddr = pCtx->r8; break;
7334 case 9: u64EffAddr = pCtx->r9; break;
7335 case 10: u64EffAddr = pCtx->r10; break;
7336 case 11: u64EffAddr = pCtx->r11; break;
7337 case 12: u64EffAddr = pCtx->r12; break;
7338 case 13: u64EffAddr = pCtx->r13; break;
7339 case 14: u64EffAddr = pCtx->r14; break;
7340 case 15: u64EffAddr = pCtx->r15; break;
7341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7342 }
7343 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7344
7345 /* add base */
7346 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7347 {
7348 case 0: u64EffAddr += pCtx->rax; break;
7349 case 1: u64EffAddr += pCtx->rcx; break;
7350 case 2: u64EffAddr += pCtx->rdx; break;
7351 case 3: u64EffAddr += pCtx->rbx; break;
7352 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7353 case 6: u64EffAddr += pCtx->rsi; break;
7354 case 7: u64EffAddr += pCtx->rdi; break;
7355 case 8: u64EffAddr += pCtx->r8; break;
7356 case 9: u64EffAddr += pCtx->r9; break;
7357 case 10: u64EffAddr += pCtx->r10; break;
7358 case 11: u64EffAddr += pCtx->r11; break;
7359 case 14: u64EffAddr += pCtx->r14; break;
7360 case 15: u64EffAddr += pCtx->r15; break;
7361 /* complicated encodings */
7362 case 5:
7363 case 13:
7364 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7365 {
7366 if (!pIemCpu->uRexB)
7367 {
7368 u64EffAddr += pCtx->rbp;
7369 SET_SS_DEF();
7370 }
7371 else
7372 u64EffAddr += pCtx->r13;
7373 }
7374 else
7375 {
7376 uint32_t u32Disp;
7377 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7378 u64EffAddr += (int32_t)u32Disp;
7379 }
7380 break;
7381 }
7382 break;
7383 }
7384 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7385 }
7386
7387 /* Get and add the displacement. */
7388 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7389 {
7390 case 0:
7391 break;
7392 case 1:
7393 {
7394 int8_t i8Disp;
7395 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7396 u64EffAddr += i8Disp;
7397 break;
7398 }
7399 case 2:
7400 {
7401 uint32_t u32Disp;
7402 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7403 u64EffAddr += (int32_t)u32Disp;
7404 break;
7405 }
7406 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7407 }
7408
7409 }
7410 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7411 *pGCPtrEff = u64EffAddr;
7412 else
7413 *pGCPtrEff = u64EffAddr & UINT16_MAX;
7414 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7415 return VINF_SUCCESS;
7416 }
7417 }
7418
7419 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7420}
7421
7422/** @} */
7423
7424
7425
7426/*
7427 * Include the instructions
7428 */
7429#include "IEMAllInstructions.cpp.h"
7430
7431
7432
7433
7434#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7435
7436/**
7437 * Sets up execution verification mode.
7438 */
7439static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7440{
7441 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7442 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7443
7444 /*
7445 * Always note down the address of the current instruction.
7446 */
7447 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7448 pIemCpu->uOldRip = pOrgCtx->rip;
7449
7450 /*
7451 * Enable verification and/or logging.
7452 */
7453 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7454 if ( pIemCpu->fNoRem
7455 && ( 0
7456#if 0 /* auto enable on first paged protected mode interrupt */
7457 || ( pOrgCtx->eflags.Bits.u1IF
7458 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7459 && TRPMHasTrap(pVCpu)
7460 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7461#endif
7462#if 0
7463 || ( pOrgCtx->cs == 0x10
7464 && ( pOrgCtx->rip == 0x90119e3e
7465 || pOrgCtx->rip == 0x901d9810)
7466#endif
7467#if 0 /* Auto enable DSL - FPU stuff. */
7468 || ( pOrgCtx->cs == 0x10
7469 && (// pOrgCtx->rip == 0xc02ec07f
7470 //|| pOrgCtx->rip == 0xc02ec082
7471 //|| pOrgCtx->rip == 0xc02ec0c9
7472 0
7473 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7474#endif
7475#if 0 /* Auto enable DSL - fstp st0 stuff. */
7476 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7477#endif
7478#if 0
7479 || pOrgCtx->rip == 0x9022bb3a
7480#endif
7481#if 0
7482 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7483#endif
7484#if 0
7485 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7486 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7487#endif
7488#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7489 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7490 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7491 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7492#endif
7493#if 0 /* NT4SP1 - xadd early boot. */
7494 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7495#endif
7496#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7497 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7498#endif
7499#if 0 /* NT4SP1 - cmpxchg (AMD). */
7500 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7501#endif
7502#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7503 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7504#endif
7505#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7506 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7507
7508#endif
7509#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7510 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7511
7512#endif
7513#if 0 /* NT4SP1 - frstor [ecx] */
7514 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7515#endif
7516 )
7517 )
7518 {
7519 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7520 RTLogFlags(NULL, "enabled");
7521 pIemCpu->fNoRem = false;
7522 }
7523
7524 /*
7525 * Switch state.
7526 */
7527 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7528 {
7529 static CPUMCTX s_DebugCtx; /* Ugly! */
7530
7531 s_DebugCtx = *pOrgCtx;
7532 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7533 }
7534
7535 /*
7536 * See if there is an interrupt pending in TRPM and inject it if we can.
7537 */
7538 pIemCpu->uInjectCpl = UINT8_MAX;
7539 if ( pOrgCtx->eflags.Bits.u1IF
7540 && TRPMHasTrap(pVCpu)
7541 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7542 {
7543 uint8_t u8TrapNo;
7544 TRPMEVENT enmType;
7545 RTGCUINT uErrCode;
7546 RTGCPTR uCr2;
7547 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
7548 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7549 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7550 TRPMResetTrap(pVCpu);
7551 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7552 }
7553
7554 /*
7555 * Reset the counters.
7556 */
7557 pIemCpu->cIOReads = 0;
7558 pIemCpu->cIOWrites = 0;
7559 pIemCpu->fIgnoreRaxRdx = false;
7560 pIemCpu->fOverlappingMovs = false;
7561 pIemCpu->fUndefinedEFlags = 0;
7562
7563 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7564 {
7565 /*
7566 * Free all verification records.
7567 */
7568 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7569 pIemCpu->pIemEvtRecHead = NULL;
7570 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7571 do
7572 {
7573 while (pEvtRec)
7574 {
7575 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7576 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7577 pIemCpu->pFreeEvtRec = pEvtRec;
7578 pEvtRec = pNext;
7579 }
7580 pEvtRec = pIemCpu->pOtherEvtRecHead;
7581 pIemCpu->pOtherEvtRecHead = NULL;
7582 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7583 } while (pEvtRec);
7584 }
7585}
7586
7587
7588/**
7589 * Allocate an event record.
7590 * @returns Pointer to a record.
7591 */
7592static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7593{
7594 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7595 return NULL;
7596
7597 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7598 if (pEvtRec)
7599 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7600 else
7601 {
7602 if (!pIemCpu->ppIemEvtRecNext)
7603 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7604
7605 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7606 if (!pEvtRec)
7607 return NULL;
7608 }
7609 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7610 pEvtRec->pNext = NULL;
7611 return pEvtRec;
7612}
7613
7614
7615/**
7616 * IOMMMIORead notification.
7617 */
7618VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7619{
7620 PVMCPU pVCpu = VMMGetCpu(pVM);
7621 if (!pVCpu)
7622 return;
7623 PIEMCPU pIemCpu = &pVCpu->iem.s;
7624 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7625 if (!pEvtRec)
7626 return;
7627 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7628 pEvtRec->u.RamRead.GCPhys = GCPhys;
7629 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7630 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7631 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7632}
7633
7634
7635/**
7636 * IOMMMIOWrite notification.
7637 */
7638VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7639{
7640 PVMCPU pVCpu = VMMGetCpu(pVM);
7641 if (!pVCpu)
7642 return;
7643 PIEMCPU pIemCpu = &pVCpu->iem.s;
7644 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7645 if (!pEvtRec)
7646 return;
7647 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7648 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7649 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7650 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7651 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7652 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7653 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7654 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7655 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7656}
7657
7658
7659/**
7660 * IOMIOPortRead notification.
7661 */
7662VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7663{
7664 PVMCPU pVCpu = VMMGetCpu(pVM);
7665 if (!pVCpu)
7666 return;
7667 PIEMCPU pIemCpu = &pVCpu->iem.s;
7668 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7669 if (!pEvtRec)
7670 return;
7671 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7672 pEvtRec->u.IOPortRead.Port = Port;
7673 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7674 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7675 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7676}
7677
7678/**
7679 * IOMIOPortWrite notification.
7680 */
7681VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7682{
7683 PVMCPU pVCpu = VMMGetCpu(pVM);
7684 if (!pVCpu)
7685 return;
7686 PIEMCPU pIemCpu = &pVCpu->iem.s;
7687 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7688 if (!pEvtRec)
7689 return;
7690 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7691 pEvtRec->u.IOPortWrite.Port = Port;
7692 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7693 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7694 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7695 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7696}
7697
7698
7699VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7700{
7701 AssertFailed();
7702}
7703
7704
7705VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7706{
7707 AssertFailed();
7708}
7709
7710
7711/**
7712 * Fakes and records an I/O port read.
7713 *
7714 * @returns VINF_SUCCESS.
7715 * @param pIemCpu The IEM per CPU data.
7716 * @param Port The I/O port.
7717 * @param pu32Value Where to store the fake value.
7718 * @param cbValue The size of the access.
7719 */
7720static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7721{
7722 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7723 if (pEvtRec)
7724 {
7725 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7726 pEvtRec->u.IOPortRead.Port = Port;
7727 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7728 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7729 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7730 }
7731 pIemCpu->cIOReads++;
7732 *pu32Value = 0xcccccccc;
7733 return VINF_SUCCESS;
7734}
7735
7736
7737/**
7738 * Fakes and records an I/O port write.
7739 *
7740 * @returns VINF_SUCCESS.
7741 * @param pIemCpu The IEM per CPU data.
7742 * @param Port The I/O port.
7743 * @param u32Value The value being written.
7744 * @param cbValue The size of the access.
7745 */
7746static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7747{
7748 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7749 if (pEvtRec)
7750 {
7751 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7752 pEvtRec->u.IOPortWrite.Port = Port;
7753 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7754 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7755 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7756 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7757 }
7758 pIemCpu->cIOWrites++;
7759 return VINF_SUCCESS;
7760}
7761
7762
7763/**
7764 * Used to add extra details about a stub case.
7765 * @param pIemCpu The IEM per CPU state.
7766 */
7767static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7768{
7769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7770 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7771 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7772 char szRegs[4096];
7773 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7774 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7775 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7776 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7777 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7778 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7779 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7780 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7781 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7782 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7783 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7784 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7785 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7786 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7787 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7788 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7789 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7790 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7791 " efer=%016VR{efer}\n"
7792 " pat=%016VR{pat}\n"
7793 " sf_mask=%016VR{sf_mask}\n"
7794 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7795 " lstar=%016VR{lstar}\n"
7796 " star=%016VR{star} cstar=%016VR{cstar}\n"
7797 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7798 );
7799
7800 char szInstr1[256];
7801 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
7802 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7803 szInstr1, sizeof(szInstr1), NULL);
7804 char szInstr2[256];
7805 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
7806 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7807 szInstr2, sizeof(szInstr2), NULL);
7808
7809 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7810}
7811
7812
7813/**
7814 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7815 * dump to the assertion info.
7816 *
7817 * @param pEvtRec The record to dump.
7818 */
7819static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7820{
7821 switch (pEvtRec->enmEvent)
7822 {
7823 case IEMVERIFYEVENT_IOPORT_READ:
7824 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7825 pEvtRec->u.IOPortWrite.Port,
7826 pEvtRec->u.IOPortWrite.cbValue);
7827 break;
7828 case IEMVERIFYEVENT_IOPORT_WRITE:
7829 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7830 pEvtRec->u.IOPortWrite.Port,
7831 pEvtRec->u.IOPortWrite.cbValue,
7832 pEvtRec->u.IOPortWrite.u32Value);
7833 break;
7834 case IEMVERIFYEVENT_RAM_READ:
7835 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7836 pEvtRec->u.RamRead.GCPhys,
7837 pEvtRec->u.RamRead.cb);
7838 break;
7839 case IEMVERIFYEVENT_RAM_WRITE:
7840 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7841 pEvtRec->u.RamWrite.GCPhys,
7842 pEvtRec->u.RamWrite.cb,
7843 (int)pEvtRec->u.RamWrite.cb,
7844 pEvtRec->u.RamWrite.ab);
7845 break;
7846 default:
7847 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7848 break;
7849 }
7850}
7851
7852
7853/**
7854 * Raises an assertion on the specified record, showing the given message with
7855 * a record dump attached.
7856 *
7857 * @param pIemCpu The IEM per CPU data.
7858 * @param pEvtRec1 The first record.
7859 * @param pEvtRec2 The second record.
7860 * @param pszMsg The message explaining why we're asserting.
7861 */
7862static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7863{
7864 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7865 iemVerifyAssertAddRecordDump(pEvtRec1);
7866 iemVerifyAssertAddRecordDump(pEvtRec2);
7867 iemVerifyAssertMsg2(pIemCpu);
7868 RTAssertPanic();
7869}
7870
7871
7872/**
7873 * Raises an assertion on the specified record, showing the given message with
7874 * a record dump attached.
7875 *
7876 * @param pIemCpu The IEM per CPU data.
7877 * @param pEvtRec1 The first record.
7878 * @param pszMsg The message explaining why we're asserting.
7879 */
7880static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7881{
7882 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7883 iemVerifyAssertAddRecordDump(pEvtRec);
7884 iemVerifyAssertMsg2(pIemCpu);
7885 RTAssertPanic();
7886}
7887
7888
7889/**
7890 * Verifies a write record.
7891 *
7892 * @param pIemCpu The IEM per CPU data.
7893 * @param pEvtRec The write record.
7894 */
7895static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7896{
7897 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7898 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7899 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7900 if ( RT_FAILURE(rc)
7901 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7902 {
7903 /* fend off ins */
7904 if ( !pIemCpu->cIOReads
7905 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7906 || ( pEvtRec->u.RamWrite.cb != 1
7907 && pEvtRec->u.RamWrite.cb != 2
7908 && pEvtRec->u.RamWrite.cb != 4) )
7909 {
7910 /* fend off ROMs */
7911 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7912 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7913 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7914 {
7915 /* fend off fxsave */
7916 if (pEvtRec->u.RamWrite.cb != 512)
7917 {
7918 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7919 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7920 RTAssertMsg2Add("REM: %.*Rhxs\n"
7921 "IEM: %.*Rhxs\n",
7922 pEvtRec->u.RamWrite.cb, abBuf,
7923 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7924 iemVerifyAssertAddRecordDump(pEvtRec);
7925 iemVerifyAssertMsg2(pIemCpu);
7926 RTAssertPanic();
7927 }
7928 }
7929 }
7930 }
7931
7932}
7933
7934/**
7935 * Performs the post-execution verfication checks.
7936 */
7937static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7938{
7939 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7940 return;
7941
7942 /*
7943 * Switch back the state.
7944 */
7945 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7946 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7947 Assert(pOrgCtx != pDebugCtx);
7948 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7949
7950 /*
7951 * Execute the instruction in REM.
7952 */
7953 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7954 EMRemLock(pVM);
7955 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7956 AssertRC(rc);
7957 EMRemUnlock(pVM);
7958
7959 /*
7960 * Compare the register states.
7961 */
7962 unsigned cDiffs = 0;
7963 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7964 {
7965 //Log(("REM and IEM ends up with different registers!\n"));
7966
7967# define CHECK_FIELD(a_Field) \
7968 do \
7969 { \
7970 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7971 { \
7972 switch (sizeof(pOrgCtx->a_Field)) \
7973 { \
7974 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7975 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7976 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7977 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7978 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
7979 } \
7980 cDiffs++; \
7981 } \
7982 } while (0)
7983
7984# define CHECK_BIT_FIELD(a_Field) \
7985 do \
7986 { \
7987 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7988 { \
7989 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
7990 cDiffs++; \
7991 } \
7992 } while (0)
7993
7994# define CHECK_SEL(a_Sel) \
7995 do \
7996 { \
7997 CHECK_FIELD(a_Sel.Sel); \
7998 CHECK_FIELD(a_Sel.Attr.u); \
7999 CHECK_FIELD(a_Sel.u64Base); \
8000 CHECK_FIELD(a_Sel.u32Limit); \
8001 CHECK_FIELD(a_Sel.fFlags); \
8002 } while (0)
8003
8004#if 1 /* The recompiler doesn't update these the intel way. */
8005 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8006 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8007 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8008 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8009 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8010 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8011 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8012 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8013 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8014 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8015#endif
8016 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8017 {
8018 RTAssertMsg2Weak(" the FPU state differs\n");
8019 cDiffs++;
8020 CHECK_FIELD(fpu.FCW);
8021 CHECK_FIELD(fpu.FSW);
8022 CHECK_FIELD(fpu.FTW);
8023 CHECK_FIELD(fpu.FOP);
8024 CHECK_FIELD(fpu.FPUIP);
8025 CHECK_FIELD(fpu.CS);
8026 CHECK_FIELD(fpu.Rsrvd1);
8027 CHECK_FIELD(fpu.FPUDP);
8028 CHECK_FIELD(fpu.DS);
8029 CHECK_FIELD(fpu.Rsrvd2);
8030 CHECK_FIELD(fpu.MXCSR);
8031 CHECK_FIELD(fpu.MXCSR_MASK);
8032 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8033 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8034 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8035 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8036 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8037 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8038 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8039 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8040 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8041 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8042 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8043 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8044 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8045 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8046 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8047 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8048 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8049 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8050 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8051 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8052 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8053 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8054 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8055 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8056 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8057 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8058 }
8059 CHECK_FIELD(rip);
8060 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8061 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8062 {
8063 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8064 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8065 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8066 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8067 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8068 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8069 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8070 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8071 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8072 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8073 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8074 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8075 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8076 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8077 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8078 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8079 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8080 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8081 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8082 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8083 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8084 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8085 }
8086
8087 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8088 CHECK_FIELD(rax);
8089 CHECK_FIELD(rcx);
8090 if (!pIemCpu->fIgnoreRaxRdx)
8091 CHECK_FIELD(rdx);
8092 CHECK_FIELD(rbx);
8093 CHECK_FIELD(rsp);
8094 CHECK_FIELD(rbp);
8095 CHECK_FIELD(rsi);
8096 CHECK_FIELD(rdi);
8097 CHECK_FIELD(r8);
8098 CHECK_FIELD(r9);
8099 CHECK_FIELD(r10);
8100 CHECK_FIELD(r11);
8101 CHECK_FIELD(r12);
8102 CHECK_FIELD(r13);
8103 CHECK_SEL(cs);
8104 CHECK_SEL(ss);
8105 CHECK_SEL(ds);
8106 CHECK_SEL(es);
8107 CHECK_SEL(fs);
8108 CHECK_SEL(gs);
8109 CHECK_FIELD(cr0);
8110 CHECK_FIELD(cr2);
8111 CHECK_FIELD(cr3);
8112 CHECK_FIELD(cr4);
8113 CHECK_FIELD(dr[0]);
8114 CHECK_FIELD(dr[1]);
8115 CHECK_FIELD(dr[2]);
8116 CHECK_FIELD(dr[3]);
8117 CHECK_FIELD(dr[6]);
8118 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8119 CHECK_FIELD(dr[7]);
8120 CHECK_FIELD(gdtr.cbGdt);
8121 CHECK_FIELD(gdtr.pGdt);
8122 CHECK_FIELD(idtr.cbIdt);
8123 CHECK_FIELD(idtr.pIdt);
8124 CHECK_SEL(ldtr);
8125 CHECK_SEL(tr);
8126 CHECK_FIELD(SysEnter.cs);
8127 CHECK_FIELD(SysEnter.eip);
8128 CHECK_FIELD(SysEnter.esp);
8129 CHECK_FIELD(msrEFER);
8130 CHECK_FIELD(msrSTAR);
8131 CHECK_FIELD(msrPAT);
8132 CHECK_FIELD(msrLSTAR);
8133 CHECK_FIELD(msrCSTAR);
8134 CHECK_FIELD(msrSFMASK);
8135 CHECK_FIELD(msrKERNELGSBASE);
8136
8137 if (cDiffs != 0)
8138 {
8139 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
8140 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8141 iemVerifyAssertMsg2(pIemCpu);
8142 RTAssertPanic();
8143 }
8144# undef CHECK_FIELD
8145# undef CHECK_BIT_FIELD
8146 }
8147
8148 /*
8149 * If the register state compared fine, check the verification event
8150 * records.
8151 */
8152 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8153 {
8154 /*
8155 * Compare verficiation event records.
8156 * - I/O port accesses should be a 1:1 match.
8157 */
8158 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8159 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8160 while (pIemRec && pOtherRec)
8161 {
8162 /* Since we might miss RAM writes and reads, ignore reads and check
8163 that any written memory is the same extra ones. */
8164 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8165 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8166 && pIemRec->pNext)
8167 {
8168 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8169 iemVerifyWriteRecord(pIemCpu, pIemRec);
8170 pIemRec = pIemRec->pNext;
8171 }
8172
8173 /* Do the compare. */
8174 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8175 {
8176 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8177 break;
8178 }
8179 bool fEquals;
8180 switch (pIemRec->enmEvent)
8181 {
8182 case IEMVERIFYEVENT_IOPORT_READ:
8183 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8184 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8185 break;
8186 case IEMVERIFYEVENT_IOPORT_WRITE:
8187 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8188 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8189 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8190 break;
8191 case IEMVERIFYEVENT_RAM_READ:
8192 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8193 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8194 break;
8195 case IEMVERIFYEVENT_RAM_WRITE:
8196 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8197 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8198 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8199 break;
8200 default:
8201 fEquals = false;
8202 break;
8203 }
8204 if (!fEquals)
8205 {
8206 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8207 break;
8208 }
8209
8210 /* advance */
8211 pIemRec = pIemRec->pNext;
8212 pOtherRec = pOtherRec->pNext;
8213 }
8214
8215 /* Ignore extra writes and reads. */
8216 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8217 {
8218 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8219 iemVerifyWriteRecord(pIemCpu, pIemRec);
8220 pIemRec = pIemRec->pNext;
8221 }
8222 if (pIemRec != NULL)
8223 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8224 else if (pOtherRec != NULL)
8225 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
8226 }
8227 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8228}
8229
8230#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
8231
8232/* stubs */
8233static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8234{
8235 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8236 return VERR_INTERNAL_ERROR;
8237}
8238
8239static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8240{
8241 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8242 return VERR_INTERNAL_ERROR;
8243}
8244
8245#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
8246
8247
8248/**
8249 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8250 * IEMExecOneWithPrefetchedByPC.
8251 *
8252 * @return Strict VBox status code.
8253 * @param pVCpu The current virtual CPU.
8254 * @param pIemCpu The IEM per CPU data.
8255 * @param fExecuteInhibit If set, execute the instruction following CLI,
8256 * POP SS and MOV SS,GR.
8257 */
8258DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8259{
8260 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8261 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8262 if (rcStrict == VINF_SUCCESS)
8263 pIemCpu->cInstructions++;
8264//#ifdef DEBUG
8265// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8266//#endif
8267
8268 /* Execute the next instruction as well if a cli, pop ss or
8269 mov ss, Gr has just completed successfully. */
8270 if ( fExecuteInhibit
8271 && rcStrict == VINF_SUCCESS
8272 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8273 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8274 {
8275 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8276 if (rcStrict == VINF_SUCCESS)
8277 {
8278 b; IEM_OPCODE_GET_NEXT_U8(&b);
8279 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8280 if (rcStrict == VINF_SUCCESS)
8281 pIemCpu->cInstructions++;
8282 }
8283 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8284 }
8285
8286 /*
8287 * Return value fiddling and statistics.
8288 */
8289 if (rcStrict != VINF_SUCCESS)
8290 {
8291 if (RT_SUCCESS(rcStrict))
8292 {
8293 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8294 int32_t const rcPassUp = pIemCpu->rcPassUp;
8295 if (rcPassUp == VINF_SUCCESS)
8296 pIemCpu->cRetInfStatuses++;
8297 else if ( rcPassUp < VINF_EM_FIRST
8298 || rcPassUp > VINF_EM_LAST
8299 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8300 {
8301 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8302 pIemCpu->cRetPassUpStatus++;
8303 rcStrict = rcPassUp;
8304 }
8305 else
8306 {
8307 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8308 pIemCpu->cRetInfStatuses++;
8309 }
8310 }
8311 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8312 pIemCpu->cRetAspectNotImplemented++;
8313 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8314 pIemCpu->cRetInstrNotImplemented++;
8315#ifdef IEM_VERIFICATION_MODE
8316 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8317 rcStrict = VINF_SUCCESS;
8318#endif
8319 else
8320 pIemCpu->cRetErrStatuses++;
8321 }
8322 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8323 {
8324 pIemCpu->cRetPassUpStatus++;
8325 rcStrict = pIemCpu->rcPassUp;
8326 }
8327
8328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8330#if defined(IEM_VERIFICATION_MODE)
8331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8335#endif
8336 return rcStrict;
8337}
8338
8339
8340/**
8341 * Execute one instruction.
8342 *
8343 * @return Strict VBox status code.
8344 * @param pVCpu The current virtual CPU.
8345 */
8346VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8347{
8348 PIEMCPU pIemCpu = &pVCpu->iem.s;
8349
8350#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
8351 iemExecVerificationModeSetup(pIemCpu);
8352#endif
8353#ifdef LOG_ENABLED
8354 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8355# ifdef IN_RING3
8356 if (LogIs2Enabled())
8357 {
8358 char szInstr[256];
8359 uint32_t cbInstr = 0;
8360 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
8361 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8362 szInstr, sizeof(szInstr), &cbInstr);
8363
8364 Log3(("**** "
8365 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8366 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8367 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8368 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8369 " %s\n"
8370 ,
8371 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8372 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8373 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8374 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8375 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8376 szInstr));
8377
8378 if (LogIs3Enabled())
8379 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
8380 }
8381 else
8382# endif
8383 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8384 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8385#endif
8386
8387 /*
8388 * Do the decoding and emulation.
8389 */
8390 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8391 if (rcStrict == VINF_SUCCESS)
8392 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8393
8394#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
8395 /*
8396 * Assert some sanity.
8397 */
8398 iemExecVerificationModeCheck(pIemCpu);
8399#endif
8400 if (rcStrict != VINF_SUCCESS)
8401 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8402 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8403 return rcStrict;
8404}
8405
8406
8407VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8408{
8409 PIEMCPU pIemCpu = &pVCpu->iem.s;
8410 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8411 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8412
8413 iemInitDecoder(pIemCpu, false);
8414 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8415
8416 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8417 if (rcStrict == VINF_SUCCESS)
8418 {
8419 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8420 if (pcbWritten)
8421 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8422 }
8423 return rcStrict;
8424}
8425
8426
8427VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8428 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8429{
8430 PIEMCPU pIemCpu = &pVCpu->iem.s;
8431 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8432 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8433
8434 VBOXSTRICTRC rcStrict;
8435 if ( cbOpcodeBytes
8436 && pCtx->rip == OpcodeBytesPC)
8437 {
8438 iemInitDecoder(pIemCpu, false);
8439 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8440 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8441 rcStrict = VINF_SUCCESS;
8442 }
8443 else
8444 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8445 if (rcStrict == VINF_SUCCESS)
8446 {
8447 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8448 }
8449 return rcStrict;
8450}
8451
8452
8453VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8454{
8455 PIEMCPU pIemCpu = &pVCpu->iem.s;
8456 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8457 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8458
8459 iemInitDecoder(pIemCpu, true);
8460 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8461
8462 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8463 if (rcStrict == VINF_SUCCESS)
8464 {
8465 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8466 if (pcbWritten)
8467 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8468 }
8469 return rcStrict;
8470}
8471
8472
8473VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8474 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8475{
8476 PIEMCPU pIemCpu = &pVCpu->iem.s;
8477 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8478 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8479
8480 VBOXSTRICTRC rcStrict;
8481 if ( cbOpcodeBytes
8482 && pCtx->rip == OpcodeBytesPC)
8483 {
8484 iemInitDecoder(pIemCpu, true);
8485 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8486 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8487 rcStrict = VINF_SUCCESS;
8488 }
8489 else
8490 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8491 if (rcStrict == VINF_SUCCESS)
8492 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8493 return rcStrict;
8494}
8495
8496
8497VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8498{
8499 return IEMExecOne(pVCpu);
8500}
8501
8502
8503
8504/**
8505 * Injects a trap, fault, abort, software interrupt or external interrupt.
8506 *
8507 * The parameter list matches TRPMQueryTrapAll pretty closely.
8508 *
8509 * @returns Strict VBox status code.
8510 * @param pVCpu The current virtual CPU.
8511 * @param u8TrapNo The trap number.
8512 * @param enmType What type is it (trap/fault/abort), software
8513 * interrupt or hardware interrupt.
8514 * @param uErrCode The error code if applicable.
8515 * @param uCr2 The CR2 value if applicable.
8516 */
8517VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8518{
8519 iemInitDecoder(&pVCpu->iem.s, false);
8520
8521 uint32_t fFlags;
8522 switch (enmType)
8523 {
8524 case TRPM_HARDWARE_INT:
8525 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8526 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8527 uErrCode = uCr2 = 0;
8528 break;
8529
8530 case TRPM_SOFTWARE_INT:
8531 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8532 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8533 uErrCode = uCr2 = 0;
8534 break;
8535
8536 case TRPM_TRAP:
8537 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8538 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8539 if (u8TrapNo == X86_XCPT_PF)
8540 fFlags |= IEM_XCPT_FLAGS_CR2;
8541 switch (u8TrapNo)
8542 {
8543 case X86_XCPT_DF:
8544 case X86_XCPT_TS:
8545 case X86_XCPT_NP:
8546 case X86_XCPT_SS:
8547 case X86_XCPT_PF:
8548 case X86_XCPT_AC:
8549 fFlags |= IEM_XCPT_FLAGS_ERR;
8550 break;
8551 }
8552 break;
8553
8554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8555 }
8556
8557 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8558}
8559
8560
8561VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8562{
8563 return VERR_NOT_IMPLEMENTED;
8564}
8565
8566
8567VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8568{
8569 return VERR_NOT_IMPLEMENTED;
8570}
8571
8572
8573#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8574/**
8575 * Executes a IRET instruction with default operand size.
8576 *
8577 * This is for PATM.
8578 *
8579 * @returns VBox status code.
8580 * @param pVCpu The current virtual CPU.
8581 * @param pCtxCore The register frame.
8582 */
8583VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8584{
8585 PIEMCPU pIemCpu = &pVCpu->iem.s;
8586 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8587
8588 iemCtxCoreToCtx(pCtx, pCtxCore);
8589 iemInitDecoder(pIemCpu);
8590 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8591 if (rcStrict == VINF_SUCCESS)
8592 iemCtxToCtxCore(pCtxCore, pCtx);
8593 else
8594 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8595 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8596 return rcStrict;
8597}
8598#endif
8599
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette