VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 52478

Last change on this file since 52478 was 52395, checked in by vboxsync, 11 years ago

added todo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 419.0 KB
Line 
1/* $Id: IEMAll.cpp 52395 2014-08-15 22:27:49Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** @name IEM status codes.
192 *
193 * Not quite sure how this will play out in the end, just aliasing safe status
194 * codes for now.
195 *
196 * @{ */
197#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
198/** @} */
199
200/** Temporary hack to disable the double execution. Will be removed in favor
201 * of a dedicated execution mode in EM. */
202//#define IEM_VERIFICATION_MODE_NO_REM
203
204/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
205 * due to GCC lacking knowledge about the value range of a switch. */
206#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
207
208/**
209 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
210 * occation.
211 */
212#ifdef LOG_ENABLED
213# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
214 do { \
215 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
216 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
217 } while (0)
218#else
219# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
220 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
221#endif
222
223/**
224 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
225 * occation using the supplied logger statement.
226 *
227 * @param a_LoggerArgs What to log on failure.
228 */
229#ifdef LOG_ENABLED
230# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
231 do { \
232 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
233 /*LogFunc(a_LoggerArgs);*/ \
234 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
235 } while (0)
236#else
237# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
238 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
239#endif
240
241/**
242 * Call an opcode decoder function.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF.
246 */
247#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
256
257/**
258 * Call a common opcode decoder function taking one extra argument.
259 *
260 * We're using macors for this so that adding and removing parameters can be
261 * done as we please. See FNIEMOP_DEF_1.
262 */
263#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
264
265/**
266 * Check if we're currently executing in real or virtual 8086 mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in virtual 8086 mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Check if we're currently executing in long mode.
283 *
284 * @returns @c true if it is, @c false if not.
285 * @param a_pIemCpu The IEM state of the current CPU.
286 */
287#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
288
289/**
290 * Check if we're currently executing in real mode.
291 *
292 * @returns @c true if it is, @c false if not.
293 * @param a_pIemCpu The IEM state of the current CPU.
294 */
295#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
296
297/**
298 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
299 */
300#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
301
302/**
303 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
304 */
305#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
306
307/**
308 * Tests if at least on of the specified AMD CPUID features (extended) are
309 * marked present.
310 */
311#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
312
313/**
314 * Checks if an Intel CPUID feature is present.
315 */
316#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
317 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
318 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
319
320/**
321 * Checks if an Intel CPUID feature is present.
322 */
323#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
324 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
325
326/**
327 * Checks if an Intel CPUID feature is present in the host CPU.
328 */
329#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
330 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
331
332/**
333 * Evaluates to true if we're presenting an Intel CPU to the guest.
334 */
335#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
336
337/**
338 * Evaluates to true if we're presenting an AMD CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
341
342/**
343 * Check if the address is canonical.
344 */
345#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
346
347
348/*******************************************************************************
349* Global Variables *
350*******************************************************************************/
351extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
352
353
354/** Function table for the ADD instruction. */
355static const IEMOPBINSIZES g_iemAImpl_add =
356{
357 iemAImpl_add_u8, iemAImpl_add_u8_locked,
358 iemAImpl_add_u16, iemAImpl_add_u16_locked,
359 iemAImpl_add_u32, iemAImpl_add_u32_locked,
360 iemAImpl_add_u64, iemAImpl_add_u64_locked
361};
362
363/** Function table for the ADC instruction. */
364static const IEMOPBINSIZES g_iemAImpl_adc =
365{
366 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
367 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
368 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
369 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
370};
371
372/** Function table for the SUB instruction. */
373static const IEMOPBINSIZES g_iemAImpl_sub =
374{
375 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
376 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
377 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
378 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
379};
380
381/** Function table for the SBB instruction. */
382static const IEMOPBINSIZES g_iemAImpl_sbb =
383{
384 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
385 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
386 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
387 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
388};
389
390/** Function table for the OR instruction. */
391static const IEMOPBINSIZES g_iemAImpl_or =
392{
393 iemAImpl_or_u8, iemAImpl_or_u8_locked,
394 iemAImpl_or_u16, iemAImpl_or_u16_locked,
395 iemAImpl_or_u32, iemAImpl_or_u32_locked,
396 iemAImpl_or_u64, iemAImpl_or_u64_locked
397};
398
399/** Function table for the XOR instruction. */
400static const IEMOPBINSIZES g_iemAImpl_xor =
401{
402 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
403 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
404 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
405 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
406};
407
408/** Function table for the AND instruction. */
409static const IEMOPBINSIZES g_iemAImpl_and =
410{
411 iemAImpl_and_u8, iemAImpl_and_u8_locked,
412 iemAImpl_and_u16, iemAImpl_and_u16_locked,
413 iemAImpl_and_u32, iemAImpl_and_u32_locked,
414 iemAImpl_and_u64, iemAImpl_and_u64_locked
415};
416
417/** Function table for the CMP instruction.
418 * @remarks Making operand order ASSUMPTIONS.
419 */
420static const IEMOPBINSIZES g_iemAImpl_cmp =
421{
422 iemAImpl_cmp_u8, NULL,
423 iemAImpl_cmp_u16, NULL,
424 iemAImpl_cmp_u32, NULL,
425 iemAImpl_cmp_u64, NULL
426};
427
428/** Function table for the TEST instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431static const IEMOPBINSIZES g_iemAImpl_test =
432{
433 iemAImpl_test_u8, NULL,
434 iemAImpl_test_u16, NULL,
435 iemAImpl_test_u32, NULL,
436 iemAImpl_test_u64, NULL
437};
438
439/** Function table for the BT instruction. */
440static const IEMOPBINSIZES g_iemAImpl_bt =
441{
442 NULL, NULL,
443 iemAImpl_bt_u16, NULL,
444 iemAImpl_bt_u32, NULL,
445 iemAImpl_bt_u64, NULL
446};
447
448/** Function table for the BTC instruction. */
449static const IEMOPBINSIZES g_iemAImpl_btc =
450{
451 NULL, NULL,
452 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
453 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
454 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
455};
456
457/** Function table for the BTR instruction. */
458static const IEMOPBINSIZES g_iemAImpl_btr =
459{
460 NULL, NULL,
461 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
462 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
463 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
464};
465
466/** Function table for the BTS instruction. */
467static const IEMOPBINSIZES g_iemAImpl_bts =
468{
469 NULL, NULL,
470 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
471 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
472 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
473};
474
475/** Function table for the BSF instruction. */
476static const IEMOPBINSIZES g_iemAImpl_bsf =
477{
478 NULL, NULL,
479 iemAImpl_bsf_u16, NULL,
480 iemAImpl_bsf_u32, NULL,
481 iemAImpl_bsf_u64, NULL
482};
483
484/** Function table for the BSR instruction. */
485static const IEMOPBINSIZES g_iemAImpl_bsr =
486{
487 NULL, NULL,
488 iemAImpl_bsr_u16, NULL,
489 iemAImpl_bsr_u32, NULL,
490 iemAImpl_bsr_u64, NULL
491};
492
493/** Function table for the IMUL instruction. */
494static const IEMOPBINSIZES g_iemAImpl_imul_two =
495{
496 NULL, NULL,
497 iemAImpl_imul_two_u16, NULL,
498 iemAImpl_imul_two_u32, NULL,
499 iemAImpl_imul_two_u64, NULL
500};
501
502/** Group 1 /r lookup table. */
503static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
504{
505 &g_iemAImpl_add,
506 &g_iemAImpl_or,
507 &g_iemAImpl_adc,
508 &g_iemAImpl_sbb,
509 &g_iemAImpl_and,
510 &g_iemAImpl_sub,
511 &g_iemAImpl_xor,
512 &g_iemAImpl_cmp
513};
514
515/** Function table for the INC instruction. */
516static const IEMOPUNARYSIZES g_iemAImpl_inc =
517{
518 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
519 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
520 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
521 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
522};
523
524/** Function table for the DEC instruction. */
525static const IEMOPUNARYSIZES g_iemAImpl_dec =
526{
527 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
528 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
529 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
530 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
531};
532
533/** Function table for the NEG instruction. */
534static const IEMOPUNARYSIZES g_iemAImpl_neg =
535{
536 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
537 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
538 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
539 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
540};
541
542/** Function table for the NOT instruction. */
543static const IEMOPUNARYSIZES g_iemAImpl_not =
544{
545 iemAImpl_not_u8, iemAImpl_not_u8_locked,
546 iemAImpl_not_u16, iemAImpl_not_u16_locked,
547 iemAImpl_not_u32, iemAImpl_not_u32_locked,
548 iemAImpl_not_u64, iemAImpl_not_u64_locked
549};
550
551
552/** Function table for the ROL instruction. */
553static const IEMOPSHIFTSIZES g_iemAImpl_rol =
554{
555 iemAImpl_rol_u8,
556 iemAImpl_rol_u16,
557 iemAImpl_rol_u32,
558 iemAImpl_rol_u64
559};
560
561/** Function table for the ROR instruction. */
562static const IEMOPSHIFTSIZES g_iemAImpl_ror =
563{
564 iemAImpl_ror_u8,
565 iemAImpl_ror_u16,
566 iemAImpl_ror_u32,
567 iemAImpl_ror_u64
568};
569
570/** Function table for the RCL instruction. */
571static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
572{
573 iemAImpl_rcl_u8,
574 iemAImpl_rcl_u16,
575 iemAImpl_rcl_u32,
576 iemAImpl_rcl_u64
577};
578
579/** Function table for the RCR instruction. */
580static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
581{
582 iemAImpl_rcr_u8,
583 iemAImpl_rcr_u16,
584 iemAImpl_rcr_u32,
585 iemAImpl_rcr_u64
586};
587
588/** Function table for the SHL instruction. */
589static const IEMOPSHIFTSIZES g_iemAImpl_shl =
590{
591 iemAImpl_shl_u8,
592 iemAImpl_shl_u16,
593 iemAImpl_shl_u32,
594 iemAImpl_shl_u64
595};
596
597/** Function table for the SHR instruction. */
598static const IEMOPSHIFTSIZES g_iemAImpl_shr =
599{
600 iemAImpl_shr_u8,
601 iemAImpl_shr_u16,
602 iemAImpl_shr_u32,
603 iemAImpl_shr_u64
604};
605
606/** Function table for the SAR instruction. */
607static const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615
616/** Function table for the MUL instruction. */
617static const IEMOPMULDIVSIZES g_iemAImpl_mul =
618{
619 iemAImpl_mul_u8,
620 iemAImpl_mul_u16,
621 iemAImpl_mul_u32,
622 iemAImpl_mul_u64
623};
624
625/** Function table for the IMUL instruction working implicitly on rAX. */
626static const IEMOPMULDIVSIZES g_iemAImpl_imul =
627{
628 iemAImpl_imul_u8,
629 iemAImpl_imul_u16,
630 iemAImpl_imul_u32,
631 iemAImpl_imul_u64
632};
633
634/** Function table for the DIV instruction. */
635static const IEMOPMULDIVSIZES g_iemAImpl_div =
636{
637 iemAImpl_div_u8,
638 iemAImpl_div_u16,
639 iemAImpl_div_u32,
640 iemAImpl_div_u64
641};
642
643/** Function table for the MUL instruction. */
644static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
645{
646 iemAImpl_idiv_u8,
647 iemAImpl_idiv_u16,
648 iemAImpl_idiv_u32,
649 iemAImpl_idiv_u64
650};
651
652/** Function table for the SHLD instruction */
653static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
654{
655 iemAImpl_shld_u16,
656 iemAImpl_shld_u32,
657 iemAImpl_shld_u64,
658};
659
660/** Function table for the SHRD instruction */
661static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
662{
663 iemAImpl_shrd_u16,
664 iemAImpl_shrd_u32,
665 iemAImpl_shrd_u64,
666};
667
668
669/** Function table for the PUNPCKLBW instruction */
670static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
671/** Function table for the PUNPCKLBD instruction */
672static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
673/** Function table for the PUNPCKLDQ instruction */
674static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
675/** Function table for the PUNPCKLQDQ instruction */
676static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
677
678/** Function table for the PUNPCKHBW instruction */
679static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
680/** Function table for the PUNPCKHBD instruction */
681static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
682/** Function table for the PUNPCKHDQ instruction */
683static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
684/** Function table for the PUNPCKHQDQ instruction */
685static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
686
687/** Function table for the PXOR instruction */
688static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
689/** Function table for the PCMPEQB instruction */
690static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
691/** Function table for the PCMPEQW instruction */
692static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
693/** Function table for the PCMPEQD instruction */
694static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
695
696
697#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
698/** What IEM just wrote. */
699uint8_t g_abIemWrote[256];
700/** How much IEM just wrote. */
701size_t g_cbIemWrote;
702#endif
703
704
705/*******************************************************************************
706* Internal Functions *
707*******************************************************************************/
708static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
709static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
710static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
711static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
712/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
713static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
714static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
715static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
716static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
717static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
718static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
719static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
720static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
721static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
722static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
723static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
724static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
725static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
726static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
727static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
728static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
729static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
730static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
731static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
732static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
733static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
734static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
735static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
736static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
737static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
738static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
739static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
740static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
741
742#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
743static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
744#endif
745static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
746static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
747
748
749
750/**
751 * Sets the pass up status.
752 *
753 * @returns VINF_SUCCESS.
754 * @param pIemCpu The per CPU IEM state of the calling thread.
755 * @param rcPassUp The pass up status. Must be informational.
756 * VINF_SUCCESS is not allowed.
757 */
758static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
759{
760 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
761
762 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
763 if (rcOldPassUp == VINF_SUCCESS)
764 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
765 /* If both are EM scheduling codes, use EM priority rules. */
766 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
767 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
768 {
769 if (rcPassUp < rcOldPassUp)
770 {
771 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
772 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
773 }
774 else
775 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
776 }
777 /* Override EM scheduling with specific status code. */
778 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
779 {
780 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
781 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
782 }
783 /* Don't override specific status code, first come first served. */
784 else
785 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Initializes the execution state.
792 *
793 * @param pIemCpu The per CPU IEM state.
794 * @param fBypassHandlers Whether to bypass access handlers.
795 */
796DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
800
801#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
810#endif
811
812#ifdef VBOX_WITH_RAW_MODE_NOT_R0
813 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
814#endif
815 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
816 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
817 ? IEMMODE_64BIT
818 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
819 ? IEMMODE_32BIT
820 : IEMMODE_16BIT;
821 pIemCpu->enmCpuMode = enmMode;
822#ifdef VBOX_STRICT
823 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
824 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
825 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
826 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
827 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
828 pIemCpu->uRexReg = 127;
829 pIemCpu->uRexB = 127;
830 pIemCpu->uRexIndex = 127;
831 pIemCpu->iEffSeg = 127;
832 pIemCpu->offOpcode = 127;
833 pIemCpu->cbOpcode = 127;
834#endif
835
836 pIemCpu->cActiveMappings = 0;
837 pIemCpu->iNextMapping = 0;
838 pIemCpu->rcPassUp = VINF_SUCCESS;
839 pIemCpu->fBypassHandlers = fBypassHandlers;
840#ifdef VBOX_WITH_RAW_MODE_NOT_R0
841 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
842 && pCtx->cs.u64Base == 0
843 && pCtx->cs.u32Limit == UINT32_MAX
844 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
845 if (!pIemCpu->fInPatchCode)
846 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
847#endif
848}
849
850
851/**
852 * Initializes the decoder state.
853 *
854 * @param pIemCpu The per CPU IEM state.
855 * @param fBypassHandlers Whether to bypass access handlers.
856 */
857DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
858{
859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
860 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
861
862#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
863 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
864 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
865 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
867 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
870 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
871#endif
872
873#ifdef VBOX_WITH_RAW_MODE_NOT_R0
874 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
875#endif
876 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
877#ifdef IEM_VERIFICATION_MODE_FULL
878 if (pIemCpu->uInjectCpl != UINT8_MAX)
879 pIemCpu->uCpl = pIemCpu->uInjectCpl;
880#endif
881 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
882 ? IEMMODE_64BIT
883 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
884 ? IEMMODE_32BIT
885 : IEMMODE_16BIT;
886 pIemCpu->enmCpuMode = enmMode;
887 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
888 pIemCpu->enmEffAddrMode = enmMode;
889 if (enmMode != IEMMODE_64BIT)
890 {
891 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
892 pIemCpu->enmEffOpSize = enmMode;
893 }
894 else
895 {
896 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
897 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
898 }
899 pIemCpu->fPrefixes = 0;
900 pIemCpu->uRexReg = 0;
901 pIemCpu->uRexB = 0;
902 pIemCpu->uRexIndex = 0;
903 pIemCpu->iEffSeg = X86_SREG_DS;
904 pIemCpu->offOpcode = 0;
905 pIemCpu->cbOpcode = 0;
906 pIemCpu->cActiveMappings = 0;
907 pIemCpu->iNextMapping = 0;
908 pIemCpu->rcPassUp = VINF_SUCCESS;
909 pIemCpu->fBypassHandlers = fBypassHandlers;
910#ifdef VBOX_WITH_RAW_MODE_NOT_R0
911 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
912 && pCtx->cs.u64Base == 0
913 && pCtx->cs.u32Limit == UINT32_MAX
914 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
915 if (!pIemCpu->fInPatchCode)
916 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
917#endif
918
919#ifdef DBGFTRACE_ENABLED
920 switch (enmMode)
921 {
922 case IEMMODE_64BIT:
923 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
924 break;
925 case IEMMODE_32BIT:
926 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
927 break;
928 case IEMMODE_16BIT:
929 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
930 break;
931 }
932#endif
933}
934
935
936/**
937 * Prefetch opcodes the first time when starting executing.
938 *
939 * @returns Strict VBox status code.
940 * @param pIemCpu The IEM state.
941 * @param fBypassHandlers Whether to bypass access handlers.
942 */
943static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
944{
945#ifdef IEM_VERIFICATION_MODE_FULL
946 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
947#endif
948 iemInitDecoder(pIemCpu, fBypassHandlers);
949
950 /*
951 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
952 *
953 * First translate CS:rIP to a physical address.
954 */
955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
956 uint32_t cbToTryRead;
957 RTGCPTR GCPtrPC;
958 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
959 {
960 cbToTryRead = PAGE_SIZE;
961 GCPtrPC = pCtx->rip;
962 if (!IEM_IS_CANONICAL(GCPtrPC))
963 return iemRaiseGeneralProtectionFault0(pIemCpu);
964 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
965 }
966 else
967 {
968 uint32_t GCPtrPC32 = pCtx->eip;
969 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
970 if (GCPtrPC32 > pCtx->cs.u32Limit)
971 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
972 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
973 if (!cbToTryRead) /* overflowed */
974 {
975 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
976 cbToTryRead = UINT32_MAX;
977 }
978 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
979 Assert(GCPtrPC <= UINT32_MAX);
980 }
981
982#ifdef VBOX_WITH_RAW_MODE_NOT_R0
983 /* Allow interpretation of patch manager code blocks since they can for
984 instance throw #PFs for perfectly good reasons. */
985 if (pIemCpu->fInPatchCode)
986 {
987 size_t cbRead = 0;
988 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
989 AssertRCReturn(rc, rc);
990 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
991 return VINF_SUCCESS;
992 }
993#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
994
995 RTGCPHYS GCPhys;
996 uint64_t fFlags;
997 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
998 if (RT_FAILURE(rc))
999 {
1000 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1001 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1002 }
1003 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1004 {
1005 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1006 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1007 }
1008 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1009 {
1010 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1011 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1012 }
1013 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1014 /** @todo Check reserved bits and such stuff. PGM is better at doing
1015 * that, so do it when implementing the guest virtual address
1016 * TLB... */
1017
1018#ifdef IEM_VERIFICATION_MODE_FULL
1019 /*
1020 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1021 * instruction.
1022 */
1023 /** @todo optimize this differently by not using PGMPhysRead. */
1024 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1025 pIemCpu->GCPhysOpcodes = GCPhys;
1026 if ( offPrevOpcodes < cbOldOpcodes
1027 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1028 {
1029 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1030 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1031 pIemCpu->cbOpcode = cbNew;
1032 return VINF_SUCCESS;
1033 }
1034#endif
1035
1036 /*
1037 * Read the bytes at this address.
1038 */
1039 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1040#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1041 size_t cbActual;
1042 if ( PATMIsEnabled(pVM)
1043 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1044 {
1045 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1046 Assert(cbActual > 0);
1047 pIemCpu->cbOpcode = (uint8_t)cbActual;
1048 }
1049 else
1050#endif
1051 {
1052 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1053 if (cbToTryRead > cbLeftOnPage)
1054 cbToTryRead = cbLeftOnPage;
1055 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1056 cbToTryRead = sizeof(pIemCpu->abOpcode);
1057
1058 if (!pIemCpu->fBypassHandlers)
1059 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead);
1060 else
1061 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1062 if (rc != VINF_SUCCESS)
1063 {
1064 /** @todo status code handling */
1065 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1066 GCPtrPC, GCPhys, rc, cbToTryRead));
1067 return rc;
1068 }
1069 pIemCpu->cbOpcode = cbToTryRead;
1070 }
1071
1072 return VINF_SUCCESS;
1073}
1074
1075
1076/**
1077 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1078 * exception if it fails.
1079 *
1080 * @returns Strict VBox status code.
1081 * @param pIemCpu The IEM state.
1082 * @param cbMin The minimum number of bytes relative offOpcode
1083 * that must be read.
1084 */
1085static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1086{
1087 /*
1088 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1089 *
1090 * First translate CS:rIP to a physical address.
1091 */
1092 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1093 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1094 uint32_t cbToTryRead;
1095 RTGCPTR GCPtrNext;
1096 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1097 {
1098 cbToTryRead = PAGE_SIZE;
1099 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1100 if (!IEM_IS_CANONICAL(GCPtrNext))
1101 return iemRaiseGeneralProtectionFault0(pIemCpu);
1102 }
1103 else
1104 {
1105 uint32_t GCPtrNext32 = pCtx->eip;
1106 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1107 GCPtrNext32 += pIemCpu->cbOpcode;
1108 if (GCPtrNext32 > pCtx->cs.u32Limit)
1109 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1111 if (!cbToTryRead) /* overflowed */
1112 {
1113 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1114 cbToTryRead = UINT32_MAX;
1115 /** @todo check out wrapping around the code segment. */
1116 }
1117 if (cbToTryRead < cbMin - cbLeft)
1118 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1119 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1120 }
1121
1122 /* Only read up to the end of the page, and make sure we don't read more
1123 than the opcode buffer can hold. */
1124 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1125 if (cbToTryRead > cbLeftOnPage)
1126 cbToTryRead = cbLeftOnPage;
1127 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1128 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1129 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1130
1131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1132 /* Allow interpretation of patch manager code blocks since they can for
1133 instance throw #PFs for perfectly good reasons. */
1134 if (pIemCpu->fInPatchCode)
1135 {
1136 size_t cbRead = 0;
1137 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1138 AssertRCReturn(rc, rc);
1139 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1140 return VINF_SUCCESS;
1141 }
1142#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1143
1144 RTGCPHYS GCPhys;
1145 uint64_t fFlags;
1146 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1147 if (RT_FAILURE(rc))
1148 {
1149 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1150 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1151 }
1152 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1153 {
1154 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1155 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1156 }
1157 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1158 {
1159 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1160 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1161 }
1162 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1163 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1164 /** @todo Check reserved bits and such stuff. PGM is better at doing
1165 * that, so do it when implementing the guest virtual address
1166 * TLB... */
1167
1168 /*
1169 * Read the bytes at this address.
1170 *
1171 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1172 * and since PATM should only patch the start of an instruction there
1173 * should be no need to check again here.
1174 */
1175 if (!pIemCpu->fBypassHandlers)
1176 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1177 else
1178 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1179 if (rc != VINF_SUCCESS)
1180 {
1181 /** @todo status code handling */
1182 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1183 return rc;
1184 }
1185 pIemCpu->cbOpcode += cbToTryRead;
1186 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1187
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1194 *
1195 * @returns Strict VBox status code.
1196 * @param pIemCpu The IEM state.
1197 * @param pb Where to return the opcode byte.
1198 */
1199DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1200{
1201 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1202 if (rcStrict == VINF_SUCCESS)
1203 {
1204 uint8_t offOpcode = pIemCpu->offOpcode;
1205 *pb = pIemCpu->abOpcode[offOpcode];
1206 pIemCpu->offOpcode = offOpcode + 1;
1207 }
1208 else
1209 *pb = 0;
1210 return rcStrict;
1211}
1212
1213
1214/**
1215 * Fetches the next opcode byte.
1216 *
1217 * @returns Strict VBox status code.
1218 * @param pIemCpu The IEM state.
1219 * @param pu8 Where to return the opcode byte.
1220 */
1221DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1222{
1223 uint8_t const offOpcode = pIemCpu->offOpcode;
1224 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1225 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1226
1227 *pu8 = pIemCpu->abOpcode[offOpcode];
1228 pIemCpu->offOpcode = offOpcode + 1;
1229 return VINF_SUCCESS;
1230}
1231
1232
1233/**
1234 * Fetches the next opcode byte, returns automatically on failure.
1235 *
1236 * @param a_pu8 Where to return the opcode byte.
1237 * @remark Implicitly references pIemCpu.
1238 */
1239#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1240 do \
1241 { \
1242 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1243 if (rcStrict2 != VINF_SUCCESS) \
1244 return rcStrict2; \
1245 } while (0)
1246
1247
1248/**
1249 * Fetches the next signed byte from the opcode stream.
1250 *
1251 * @returns Strict VBox status code.
1252 * @param pIemCpu The IEM state.
1253 * @param pi8 Where to return the signed byte.
1254 */
1255DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1256{
1257 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1258}
1259
1260
1261/**
1262 * Fetches the next signed byte from the opcode stream, returning automatically
1263 * on failure.
1264 *
1265 * @param pi8 Where to return the signed byte.
1266 * @remark Implicitly references pIemCpu.
1267 */
1268#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1269 do \
1270 { \
1271 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1272 if (rcStrict2 != VINF_SUCCESS) \
1273 return rcStrict2; \
1274 } while (0)
1275
1276
1277/**
1278 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pIemCpu The IEM state.
1282 * @param pu16 Where to return the opcode dword.
1283 */
1284DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1285{
1286 uint8_t u8;
1287 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1288 if (rcStrict == VINF_SUCCESS)
1289 *pu16 = (int8_t)u8;
1290 return rcStrict;
1291}
1292
1293
1294/**
1295 * Fetches the next signed byte from the opcode stream, extending it to
1296 * unsigned 16-bit.
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pIemCpu The IEM state.
1300 * @param pu16 Where to return the unsigned word.
1301 */
1302DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1303{
1304 uint8_t const offOpcode = pIemCpu->offOpcode;
1305 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1306 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1307
1308 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1309 pIemCpu->offOpcode = offOpcode + 1;
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Fetches the next signed byte from the opcode stream and sign-extending it to
1316 * a word, returning automatically on failure.
1317 *
1318 * @param pu16 Where to return the word.
1319 * @remark Implicitly references pIemCpu.
1320 */
1321#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1322 do \
1323 { \
1324 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1325 if (rcStrict2 != VINF_SUCCESS) \
1326 return rcStrict2; \
1327 } while (0)
1328
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pIemCpu The IEM state.
1335 * @param pu32 Where to return the opcode dword.
1336 */
1337DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1338{
1339 uint8_t u8;
1340 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1341 if (rcStrict == VINF_SUCCESS)
1342 *pu32 = (int8_t)u8;
1343 return rcStrict;
1344}
1345
1346
1347/**
1348 * Fetches the next signed byte from the opcode stream, extending it to
1349 * unsigned 32-bit.
1350 *
1351 * @returns Strict VBox status code.
1352 * @param pIemCpu The IEM state.
1353 * @param pu32 Where to return the unsigned dword.
1354 */
1355DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1356{
1357 uint8_t const offOpcode = pIemCpu->offOpcode;
1358 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1359 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1360
1361 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1362 pIemCpu->offOpcode = offOpcode + 1;
1363 return VINF_SUCCESS;
1364}
1365
1366
1367/**
1368 * Fetches the next signed byte from the opcode stream and sign-extending it to
1369 * a word, returning automatically on failure.
1370 *
1371 * @param pu32 Where to return the word.
1372 * @remark Implicitly references pIemCpu.
1373 */
1374#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1375 do \
1376 { \
1377 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1378 if (rcStrict2 != VINF_SUCCESS) \
1379 return rcStrict2; \
1380 } while (0)
1381
1382
1383/**
1384 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1385 *
1386 * @returns Strict VBox status code.
1387 * @param pIemCpu The IEM state.
1388 * @param pu64 Where to return the opcode qword.
1389 */
1390DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1391{
1392 uint8_t u8;
1393 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1394 if (rcStrict == VINF_SUCCESS)
1395 *pu64 = (int8_t)u8;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Fetches the next signed byte from the opcode stream, extending it to
1402 * unsigned 64-bit.
1403 *
1404 * @returns Strict VBox status code.
1405 * @param pIemCpu The IEM state.
1406 * @param pu64 Where to return the unsigned qword.
1407 */
1408DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1409{
1410 uint8_t const offOpcode = pIemCpu->offOpcode;
1411 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1412 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1413
1414 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1415 pIemCpu->offOpcode = offOpcode + 1;
1416 return VINF_SUCCESS;
1417}
1418
1419
1420/**
1421 * Fetches the next signed byte from the opcode stream and sign-extending it to
1422 * a word, returning automatically on failure.
1423 *
1424 * @param pu64 Where to return the word.
1425 * @remark Implicitly references pIemCpu.
1426 */
1427#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1428 do \
1429 { \
1430 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1431 if (rcStrict2 != VINF_SUCCESS) \
1432 return rcStrict2; \
1433 } while (0)
1434
1435
1436/**
1437 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1438 *
1439 * @returns Strict VBox status code.
1440 * @param pIemCpu The IEM state.
1441 * @param pu16 Where to return the opcode word.
1442 */
1443DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1444{
1445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1446 if (rcStrict == VINF_SUCCESS)
1447 {
1448 uint8_t offOpcode = pIemCpu->offOpcode;
1449 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1450 pIemCpu->offOpcode = offOpcode + 2;
1451 }
1452 else
1453 *pu16 = 0;
1454 return rcStrict;
1455}
1456
1457
1458/**
1459 * Fetches the next opcode word.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pIemCpu The IEM state.
1463 * @param pu16 Where to return the opcode word.
1464 */
1465DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1466{
1467 uint8_t const offOpcode = pIemCpu->offOpcode;
1468 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1469 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1470
1471 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1472 pIemCpu->offOpcode = offOpcode + 2;
1473 return VINF_SUCCESS;
1474}
1475
1476
1477/**
1478 * Fetches the next opcode word, returns automatically on failure.
1479 *
1480 * @param a_pu16 Where to return the opcode word.
1481 * @remark Implicitly references pIemCpu.
1482 */
1483#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1484 do \
1485 { \
1486 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1487 if (rcStrict2 != VINF_SUCCESS) \
1488 return rcStrict2; \
1489 } while (0)
1490
1491
1492/**
1493 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1494 *
1495 * @returns Strict VBox status code.
1496 * @param pIemCpu The IEM state.
1497 * @param pu32 Where to return the opcode double word.
1498 */
1499DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1500{
1501 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1502 if (rcStrict == VINF_SUCCESS)
1503 {
1504 uint8_t offOpcode = pIemCpu->offOpcode;
1505 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1506 pIemCpu->offOpcode = offOpcode + 2;
1507 }
1508 else
1509 *pu32 = 0;
1510 return rcStrict;
1511}
1512
1513
1514/**
1515 * Fetches the next opcode word, zero extending it to a double word.
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pIemCpu The IEM state.
1519 * @param pu32 Where to return the opcode double word.
1520 */
1521DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1522{
1523 uint8_t const offOpcode = pIemCpu->offOpcode;
1524 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1525 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1526
1527 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1528 pIemCpu->offOpcode = offOpcode + 2;
1529 return VINF_SUCCESS;
1530}
1531
1532
1533/**
1534 * Fetches the next opcode word and zero extends it to a double word, returns
1535 * automatically on failure.
1536 *
1537 * @param a_pu32 Where to return the opcode double word.
1538 * @remark Implicitly references pIemCpu.
1539 */
1540#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1541 do \
1542 { \
1543 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1544 if (rcStrict2 != VINF_SUCCESS) \
1545 return rcStrict2; \
1546 } while (0)
1547
1548
1549/**
1550 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1551 *
1552 * @returns Strict VBox status code.
1553 * @param pIemCpu The IEM state.
1554 * @param pu64 Where to return the opcode quad word.
1555 */
1556DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1557{
1558 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1559 if (rcStrict == VINF_SUCCESS)
1560 {
1561 uint8_t offOpcode = pIemCpu->offOpcode;
1562 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1563 pIemCpu->offOpcode = offOpcode + 2;
1564 }
1565 else
1566 *pu64 = 0;
1567 return rcStrict;
1568}
1569
1570
1571/**
1572 * Fetches the next opcode word, zero extending it to a quad word.
1573 *
1574 * @returns Strict VBox status code.
1575 * @param pIemCpu The IEM state.
1576 * @param pu64 Where to return the opcode quad word.
1577 */
1578DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1579{
1580 uint8_t const offOpcode = pIemCpu->offOpcode;
1581 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1582 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1583
1584 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1585 pIemCpu->offOpcode = offOpcode + 2;
1586 return VINF_SUCCESS;
1587}
1588
1589
1590/**
1591 * Fetches the next opcode word and zero extends it to a quad word, returns
1592 * automatically on failure.
1593 *
1594 * @param a_pu64 Where to return the opcode quad word.
1595 * @remark Implicitly references pIemCpu.
1596 */
1597#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1598 do \
1599 { \
1600 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1601 if (rcStrict2 != VINF_SUCCESS) \
1602 return rcStrict2; \
1603 } while (0)
1604
1605
1606/**
1607 * Fetches the next signed word from the opcode stream.
1608 *
1609 * @returns Strict VBox status code.
1610 * @param pIemCpu The IEM state.
1611 * @param pi16 Where to return the signed word.
1612 */
1613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1614{
1615 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1616}
1617
1618
1619/**
1620 * Fetches the next signed word from the opcode stream, returning automatically
1621 * on failure.
1622 *
1623 * @param pi16 Where to return the signed word.
1624 * @remark Implicitly references pIemCpu.
1625 */
1626#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1627 do \
1628 { \
1629 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1630 if (rcStrict2 != VINF_SUCCESS) \
1631 return rcStrict2; \
1632 } while (0)
1633
1634
1635/**
1636 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1637 *
1638 * @returns Strict VBox status code.
1639 * @param pIemCpu The IEM state.
1640 * @param pu32 Where to return the opcode dword.
1641 */
1642DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1643{
1644 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1645 if (rcStrict == VINF_SUCCESS)
1646 {
1647 uint8_t offOpcode = pIemCpu->offOpcode;
1648 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1649 pIemCpu->abOpcode[offOpcode + 1],
1650 pIemCpu->abOpcode[offOpcode + 2],
1651 pIemCpu->abOpcode[offOpcode + 3]);
1652 pIemCpu->offOpcode = offOpcode + 4;
1653 }
1654 else
1655 *pu32 = 0;
1656 return rcStrict;
1657}
1658
1659
1660/**
1661 * Fetches the next opcode dword.
1662 *
1663 * @returns Strict VBox status code.
1664 * @param pIemCpu The IEM state.
1665 * @param pu32 Where to return the opcode double word.
1666 */
1667DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1668{
1669 uint8_t const offOpcode = pIemCpu->offOpcode;
1670 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1671 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1672
1673 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1674 pIemCpu->abOpcode[offOpcode + 1],
1675 pIemCpu->abOpcode[offOpcode + 2],
1676 pIemCpu->abOpcode[offOpcode + 3]);
1677 pIemCpu->offOpcode = offOpcode + 4;
1678 return VINF_SUCCESS;
1679}
1680
1681
1682/**
1683 * Fetches the next opcode dword, returns automatically on failure.
1684 *
1685 * @param a_pu32 Where to return the opcode dword.
1686 * @remark Implicitly references pIemCpu.
1687 */
1688#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1689 do \
1690 { \
1691 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1692 if (rcStrict2 != VINF_SUCCESS) \
1693 return rcStrict2; \
1694 } while (0)
1695
1696
1697/**
1698 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1699 *
1700 * @returns Strict VBox status code.
1701 * @param pIemCpu The IEM state.
1702 * @param pu32 Where to return the opcode dword.
1703 */
1704DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1705{
1706 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1707 if (rcStrict == VINF_SUCCESS)
1708 {
1709 uint8_t offOpcode = pIemCpu->offOpcode;
1710 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1711 pIemCpu->abOpcode[offOpcode + 1],
1712 pIemCpu->abOpcode[offOpcode + 2],
1713 pIemCpu->abOpcode[offOpcode + 3]);
1714 pIemCpu->offOpcode = offOpcode + 4;
1715 }
1716 else
1717 *pu64 = 0;
1718 return rcStrict;
1719}
1720
1721
1722/**
1723 * Fetches the next opcode dword, zero extending it to a quad word.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu64 Where to return the opcode quad word.
1728 */
1729DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1730{
1731 uint8_t const offOpcode = pIemCpu->offOpcode;
1732 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1733 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1734
1735 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 return VINF_SUCCESS;
1741}
1742
1743
1744/**
1745 * Fetches the next opcode dword and zero extends it to a quad word, returns
1746 * automatically on failure.
1747 *
1748 * @param a_pu64 Where to return the opcode quad word.
1749 * @remark Implicitly references pIemCpu.
1750 */
1751#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1752 do \
1753 { \
1754 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1755 if (rcStrict2 != VINF_SUCCESS) \
1756 return rcStrict2; \
1757 } while (0)
1758
1759
1760/**
1761 * Fetches the next signed double word from the opcode stream.
1762 *
1763 * @returns Strict VBox status code.
1764 * @param pIemCpu The IEM state.
1765 * @param pi32 Where to return the signed double word.
1766 */
1767DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1768{
1769 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1770}
1771
1772/**
1773 * Fetches the next signed double word from the opcode stream, returning
1774 * automatically on failure.
1775 *
1776 * @param pi32 Where to return the signed double word.
1777 * @remark Implicitly references pIemCpu.
1778 */
1779#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1780 do \
1781 { \
1782 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1783 if (rcStrict2 != VINF_SUCCESS) \
1784 return rcStrict2; \
1785 } while (0)
1786
1787
1788/**
1789 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1790 *
1791 * @returns Strict VBox status code.
1792 * @param pIemCpu The IEM state.
1793 * @param pu64 Where to return the opcode qword.
1794 */
1795DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1796{
1797 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1798 if (rcStrict == VINF_SUCCESS)
1799 {
1800 uint8_t offOpcode = pIemCpu->offOpcode;
1801 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1802 pIemCpu->abOpcode[offOpcode + 1],
1803 pIemCpu->abOpcode[offOpcode + 2],
1804 pIemCpu->abOpcode[offOpcode + 3]);
1805 pIemCpu->offOpcode = offOpcode + 4;
1806 }
1807 else
1808 *pu64 = 0;
1809 return rcStrict;
1810}
1811
1812
1813/**
1814 * Fetches the next opcode dword, sign extending it into a quad word.
1815 *
1816 * @returns Strict VBox status code.
1817 * @param pIemCpu The IEM state.
1818 * @param pu64 Where to return the opcode quad word.
1819 */
1820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1821{
1822 uint8_t const offOpcode = pIemCpu->offOpcode;
1823 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1824 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1825
1826 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1827 pIemCpu->abOpcode[offOpcode + 1],
1828 pIemCpu->abOpcode[offOpcode + 2],
1829 pIemCpu->abOpcode[offOpcode + 3]);
1830 *pu64 = i32;
1831 pIemCpu->offOpcode = offOpcode + 4;
1832 return VINF_SUCCESS;
1833}
1834
1835
1836/**
1837 * Fetches the next opcode double word and sign extends it to a quad word,
1838 * returns automatically on failure.
1839 *
1840 * @param a_pu64 Where to return the opcode quad word.
1841 * @remark Implicitly references pIemCpu.
1842 */
1843#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1844 do \
1845 { \
1846 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1847 if (rcStrict2 != VINF_SUCCESS) \
1848 return rcStrict2; \
1849 } while (0)
1850
1851
1852/**
1853 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1854 *
1855 * @returns Strict VBox status code.
1856 * @param pIemCpu The IEM state.
1857 * @param pu64 Where to return the opcode qword.
1858 */
1859DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1860{
1861 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1862 if (rcStrict == VINF_SUCCESS)
1863 {
1864 uint8_t offOpcode = pIemCpu->offOpcode;
1865 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1866 pIemCpu->abOpcode[offOpcode + 1],
1867 pIemCpu->abOpcode[offOpcode + 2],
1868 pIemCpu->abOpcode[offOpcode + 3],
1869 pIemCpu->abOpcode[offOpcode + 4],
1870 pIemCpu->abOpcode[offOpcode + 5],
1871 pIemCpu->abOpcode[offOpcode + 6],
1872 pIemCpu->abOpcode[offOpcode + 7]);
1873 pIemCpu->offOpcode = offOpcode + 8;
1874 }
1875 else
1876 *pu64 = 0;
1877 return rcStrict;
1878}
1879
1880
1881/**
1882 * Fetches the next opcode qword.
1883 *
1884 * @returns Strict VBox status code.
1885 * @param pIemCpu The IEM state.
1886 * @param pu64 Where to return the opcode qword.
1887 */
1888DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1889{
1890 uint8_t const offOpcode = pIemCpu->offOpcode;
1891 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1892 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1893
1894 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1895 pIemCpu->abOpcode[offOpcode + 1],
1896 pIemCpu->abOpcode[offOpcode + 2],
1897 pIemCpu->abOpcode[offOpcode + 3],
1898 pIemCpu->abOpcode[offOpcode + 4],
1899 pIemCpu->abOpcode[offOpcode + 5],
1900 pIemCpu->abOpcode[offOpcode + 6],
1901 pIemCpu->abOpcode[offOpcode + 7]);
1902 pIemCpu->offOpcode = offOpcode + 8;
1903 return VINF_SUCCESS;
1904}
1905
1906
1907/**
1908 * Fetches the next opcode quad word, returns automatically on failure.
1909 *
1910 * @param a_pu64 Where to return the opcode quad word.
1911 * @remark Implicitly references pIemCpu.
1912 */
1913#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1914 do \
1915 { \
1916 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1917 if (rcStrict2 != VINF_SUCCESS) \
1918 return rcStrict2; \
1919 } while (0)
1920
1921
1922/** @name Misc Worker Functions.
1923 * @{
1924 */
1925
1926
1927/**
1928 * Validates a new SS segment.
1929 *
1930 * @returns VBox strict status code.
1931 * @param pIemCpu The IEM per CPU instance data.
1932 * @param pCtx The CPU context.
1933 * @param NewSS The new SS selctor.
1934 * @param uCpl The CPL to load the stack for.
1935 * @param pDesc Where to return the descriptor.
1936 */
1937static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1938{
1939 NOREF(pCtx);
1940
1941 /* Null selectors are not allowed (we're not called for dispatching
1942 interrupts with SS=0 in long mode). */
1943 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1944 {
1945 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1946 return iemRaiseTaskSwitchFault0(pIemCpu);
1947 }
1948
1949 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1950 if ((NewSS & X86_SEL_RPL) != uCpl)
1951 {
1952 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1953 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1954 }
1955
1956 /*
1957 * Read the descriptor.
1958 */
1959 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1960 if (rcStrict != VINF_SUCCESS)
1961 return rcStrict;
1962
1963 /*
1964 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1965 */
1966 if (!pDesc->Legacy.Gen.u1DescType)
1967 {
1968 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1969 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1970 }
1971
1972 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1973 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1974 {
1975 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1976 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1977 }
1978 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1979 {
1980 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1981 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1982 }
1983
1984 /* Is it there? */
1985 /** @todo testcase: Is this checked before the canonical / limit check below? */
1986 if (!pDesc->Legacy.Gen.u1Present)
1987 {
1988 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1989 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1990 }
1991
1992 return VINF_SUCCESS;
1993}
1994
1995
1996/**
1997 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1998 * not.
1999 *
2000 * @param a_pIemCpu The IEM per CPU data.
2001 * @param a_pCtx The CPU context.
2002 */
2003#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2004# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2005 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2006 ? (a_pCtx)->eflags.u \
2007 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2008#else
2009# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2010 ( (a_pCtx)->eflags.u )
2011#endif
2012
2013/**
2014 * Updates the EFLAGS in the correct manner wrt. PATM.
2015 *
2016 * @param a_pIemCpu The IEM per CPU data.
2017 * @param a_pCtx The CPU context.
2018 */
2019#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2020# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2021 do { \
2022 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2023 (a_pCtx)->eflags.u = (a_fEfl); \
2024 else \
2025 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2026 } while (0)
2027#else
2028# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2029 do { \
2030 (a_pCtx)->eflags.u = (a_fEfl); \
2031 } while (0)
2032#endif
2033
2034
2035/** @} */
2036
2037/** @name Raising Exceptions.
2038 *
2039 * @{
2040 */
2041
2042/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2043 * @{ */
2044/** CPU exception. */
2045#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2046/** External interrupt (from PIC, APIC, whatever). */
2047#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2048/** Software interrupt (int or into, not bound).
2049 * Returns to the following instruction */
2050#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2051/** Takes an error code. */
2052#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2053/** Takes a CR2. */
2054#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2055/** Generated by the breakpoint instruction. */
2056#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2057/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2058#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2059/** @} */
2060
2061
2062/**
2063 * Loads the specified stack far pointer from the TSS.
2064 *
2065 * @returns VBox strict status code.
2066 * @param pIemCpu The IEM per CPU instance data.
2067 * @param pCtx The CPU context.
2068 * @param uCpl The CPL to load the stack for.
2069 * @param pSelSS Where to return the new stack segment.
2070 * @param puEsp Where to return the new stack pointer.
2071 */
2072static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2073 PRTSEL pSelSS, uint32_t *puEsp)
2074{
2075 VBOXSTRICTRC rcStrict;
2076 Assert(uCpl < 4);
2077 *puEsp = 0; /* make gcc happy */
2078 *pSelSS = 0; /* make gcc happy */
2079
2080 switch (pCtx->tr.Attr.n.u4Type)
2081 {
2082 /*
2083 * 16-bit TSS (X86TSS16).
2084 */
2085 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2086 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2087 {
2088 uint32_t off = uCpl * 4 + 2;
2089 if (off + 4 > pCtx->tr.u32Limit)
2090 {
2091 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2092 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2093 }
2094
2095 uint32_t u32Tmp = 0; /* gcc maybe... */
2096 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2097 if (rcStrict == VINF_SUCCESS)
2098 {
2099 *puEsp = RT_LOWORD(u32Tmp);
2100 *pSelSS = RT_HIWORD(u32Tmp);
2101 return VINF_SUCCESS;
2102 }
2103 break;
2104 }
2105
2106 /*
2107 * 32-bit TSS (X86TSS32).
2108 */
2109 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2110 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2111 {
2112 uint32_t off = uCpl * 8 + 4;
2113 if (off + 7 > pCtx->tr.u32Limit)
2114 {
2115 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2116 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2117 }
2118
2119 uint64_t u64Tmp;
2120 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2121 if (rcStrict == VINF_SUCCESS)
2122 {
2123 *puEsp = u64Tmp & UINT32_MAX;
2124 *pSelSS = (RTSEL)(u64Tmp >> 32);
2125 return VINF_SUCCESS;
2126 }
2127 break;
2128 }
2129
2130 default:
2131 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2132 }
2133 return rcStrict;
2134}
2135
2136
2137/**
2138 * Loads the specified stack pointer from the 64-bit TSS.
2139 *
2140 * @returns VBox strict status code.
2141 * @param pIemCpu The IEM per CPU instance data.
2142 * @param pCtx The CPU context.
2143 * @param uCpl The CPL to load the stack for.
2144 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2145 * @param puRsp Where to return the new stack pointer.
2146 */
2147static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2148 uint64_t *puRsp)
2149{
2150 Assert(uCpl < 4);
2151 Assert(uIst < 8);
2152 *puRsp = 0; /* make gcc happy */
2153
2154 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2155
2156 uint32_t off;
2157 if (uIst)
2158 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2159 else
2160 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2161 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2162 {
2163 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2164 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166
2167 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2168}
2169
2170
2171/**
2172 * Adjust the CPU state according to the exception being raised.
2173 *
2174 * @param pCtx The CPU context.
2175 * @param u8Vector The exception that has been raised.
2176 */
2177DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2178{
2179 switch (u8Vector)
2180 {
2181 case X86_XCPT_DB:
2182 pCtx->dr[7] &= ~X86_DR7_GD;
2183 break;
2184 /** @todo Read the AMD and Intel exception reference... */
2185 }
2186}
2187
2188
2189/**
2190 * Implements exceptions and interrupts for real mode.
2191 *
2192 * @returns VBox strict status code.
2193 * @param pIemCpu The IEM per CPU instance data.
2194 * @param pCtx The CPU context.
2195 * @param cbInstr The number of bytes to offset rIP by in the return
2196 * address.
2197 * @param u8Vector The interrupt / exception vector number.
2198 * @param fFlags The flags.
2199 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2200 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2201 */
2202static VBOXSTRICTRC
2203iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2204 PCPUMCTX pCtx,
2205 uint8_t cbInstr,
2206 uint8_t u8Vector,
2207 uint32_t fFlags,
2208 uint16_t uErr,
2209 uint64_t uCr2)
2210{
2211 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2212 NOREF(uErr); NOREF(uCr2);
2213
2214 /*
2215 * Read the IDT entry.
2216 */
2217 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2218 {
2219 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2220 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2221 }
2222 RTFAR16 Idte;
2223 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2224 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2225 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2226 return rcStrict;
2227
2228 /*
2229 * Push the stack frame.
2230 */
2231 uint16_t *pu16Frame;
2232 uint64_t uNewRsp;
2233 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2234 if (rcStrict != VINF_SUCCESS)
2235 return rcStrict;
2236
2237 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2238 pu16Frame[2] = (uint16_t)fEfl;
2239 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2240 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2241 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2242 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2243 return rcStrict;
2244
2245 /*
2246 * Load the vector address into cs:ip and make exception specific state
2247 * adjustments.
2248 */
2249 pCtx->cs.Sel = Idte.sel;
2250 pCtx->cs.ValidSel = Idte.sel;
2251 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2252 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2253 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2254 pCtx->rip = Idte.off;
2255 fEfl &= ~X86_EFL_IF;
2256 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2257
2258 /** @todo do we actually do this in real mode? */
2259 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2260 iemRaiseXcptAdjustState(pCtx, u8Vector);
2261
2262 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2263}
2264
2265
2266/**
2267 * Loads a NULL data selector into when coming from V8086 mode.
2268 *
2269 * @param pIemCpu The IEM per CPU instance data.
2270 * @param pSReg Pointer to the segment register.
2271 */
2272static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2273{
2274 pSReg->Sel = 0;
2275 pSReg->ValidSel = 0;
2276 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2277 {
2278 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2279 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2280 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2281 }
2282 else
2283 {
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 /** @todo check this on AMD-V */
2286 pSReg->u64Base = 0;
2287 pSReg->u32Limit = 0;
2288 }
2289}
2290
2291
2292/**
2293 * Loads a segment selector during a task switch in V8086 mode.
2294 *
2295 * @param pIemCpu The IEM per CPU instance data.
2296 * @param pSReg Pointer to the segment register.
2297 * @param uSel The selector value to load.
2298 */
2299static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2300{
2301 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2302 pSReg->Sel = uSel;
2303 pSReg->ValidSel = uSel;
2304 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2305 pSReg->u64Base = uSel << 4;
2306 pSReg->u32Limit = 0xffff;
2307 pSReg->Attr.u = 0xf3;
2308}
2309
2310
2311/**
2312 * Loads a NULL data selector into a selector register, both the hidden and
2313 * visible parts, in protected mode.
2314 *
2315 * @param pIemCpu The IEM state of the calling EMT.
2316 * @param pSReg Pointer to the segment register.
2317 * @param uRpl The RPL.
2318 */
2319static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2320{
2321 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2322 * data selector in protected mode. */
2323 pSReg->Sel = uRpl;
2324 pSReg->ValidSel = uRpl;
2325 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2326 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2327 {
2328 /* VT-x (Intel 3960x) observed doing something like this. */
2329 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2330 pSReg->u32Limit = UINT32_MAX;
2331 pSReg->u64Base = 0;
2332 }
2333 else
2334 {
2335 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2336 pSReg->u32Limit = 0;
2337 pSReg->u64Base = 0;
2338 }
2339}
2340
2341
2342/**
2343 * Loads a segment selector during a task switch in protected mode. In this task
2344 * switch scenario, we would throw #TS exceptions rather than #GPs.
2345 *
2346 * @returns VBox strict status code.
2347 * @param pIemCpu The IEM per CPU instance data.
2348 * @param pSReg Pointer to the segment register.
2349 * @param uSel The new selector value.
2350 *
2351 * @remarks This does -NOT- handle CS or SS.
2352 * @remarks This expects pIemCpu->uCpl to be up to date.
2353 */
2354static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2355{
2356 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2357
2358 /* Null data selector. */
2359 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2360 {
2361 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2363 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2364 return VINF_SUCCESS;
2365 }
2366
2367 /* Fetch the descriptor. */
2368 IEMSELDESC Desc;
2369 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2370 if (rcStrict != VINF_SUCCESS)
2371 {
2372 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2373 VBOXSTRICTRC_VAL(rcStrict)));
2374 return rcStrict;
2375 }
2376
2377 /* Must be a data segment or readable code segment. */
2378 if ( !Desc.Legacy.Gen.u1DescType
2379 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2380 {
2381 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2382 Desc.Legacy.Gen.u4Type));
2383 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2384 }
2385
2386 /* Check privileges for data segments and non-conforming code segments. */
2387 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2388 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2389 {
2390 /* The RPL and the new CPL must be less than or equal to the DPL. */
2391 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2392 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2393 {
2394 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2395 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2396 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2397 }
2398 }
2399
2400 /* Is it there? */
2401 if (!Desc.Legacy.Gen.u1Present)
2402 {
2403 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2404 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2405 }
2406
2407 /* The base and limit. */
2408 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2409 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2410
2411 /*
2412 * Ok, everything checked out fine. Now set the accessed bit before
2413 * committing the result into the registers.
2414 */
2415 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2416 {
2417 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2421 }
2422
2423 /* Commit */
2424 pSReg->Sel = uSel;
2425 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2426 pSReg->u32Limit = cbLimit;
2427 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2428 pSReg->ValidSel = uSel;
2429 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2430 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2431 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2432
2433 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2434 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2435 return VINF_SUCCESS;
2436}
2437
2438
2439/**
2440 * Performs a task switch.
2441 *
2442 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2443 * caller is responsible for performing the necessary checks (like DPL, TSS
2444 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2445 * reference for JMP, CALL, IRET.
2446 *
2447 * If the task switch is the due to a software interrupt or hardware exception,
2448 * the caller is responsible for validating the TSS selector and descriptor. See
2449 * Intel Instruction reference for INT n.
2450 *
2451 * @returns VBox strict status code.
2452 * @param pIemCpu The IEM per CPU instance data.
2453 * @param pCtx The CPU context.
2454 * @param enmTaskSwitch What caused this task switch.
2455 * @param uNextEip The EIP effective after the task switch.
2456 * @param fFlags The flags.
2457 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2458 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2459 * @param SelTSS The TSS selector of the new task.
2460 * @param pNewDescTSS Pointer to the new TSS descriptor.
2461 */
2462static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu,
2463 PCPUMCTX pCtx,
2464 IEMTASKSWITCH enmTaskSwitch,
2465 uint32_t uNextEip,
2466 uint32_t fFlags,
2467 uint16_t uErr,
2468 uint64_t uCr2,
2469 RTSEL SelTSS,
2470 PIEMSELDESC pNewDescTSS)
2471{
2472 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2473 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2474
2475 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2476 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2477 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2478 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2479 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2480
2481 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2482 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2483
2484 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2485 fIsNewTSS386, pCtx->eip, uNextEip));
2486
2487 /* Update CR2 in case it's a page-fault. */
2488 /** @todo This should probably be done much earlier in IEM/PGM. See
2489 * @bugref{5653} comment #49. */
2490 if (fFlags & IEM_XCPT_FLAGS_CR2)
2491 pCtx->cr2 = uCr2;
2492
2493 /*
2494 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2495 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2496 */
2497 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2498 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2499 if (uNewTSSLimit < uNewTSSLimitMin)
2500 {
2501 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2502 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2503 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2504 }
2505
2506 /*
2507 * Check the current TSS limit. The last written byte to the current TSS during the
2508 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2509 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2510 *
2511 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2512 * end up with smaller than "legal" TSS limits.
2513 */
2514 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2515 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2516 if (uCurTSSLimit < uCurTSSLimitMin)
2517 {
2518 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2519 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2520 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2521 }
2522
2523 /*
2524 * Verify that the new TSS can be accessed and map it. Map only the required contents
2525 * and not the entire TSS.
2526 */
2527 void *pvNewTSS;
2528 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2529 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2530 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2531 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2532 * not perform correct translation if this happens. See Intel spec. 7.2.1
2533 * "Task-State Segment" */
2534 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2535 if (rcStrict != VINF_SUCCESS)
2536 {
2537 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2538 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2539 return rcStrict;
2540 }
2541
2542 /*
2543 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2544 */
2545 uint32_t u32EFlags = pCtx->eflags.u32;
2546 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2547 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2548 {
2549 PX86DESC pDescCurTSS;
2550 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2551 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2552 if (rcStrict != VINF_SUCCESS)
2553 {
2554 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2555 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2556 return rcStrict;
2557 }
2558
2559 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2560 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2564 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567
2568 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2569 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2570 {
2571 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2572 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2573 u32EFlags &= ~X86_EFL_NT;
2574 }
2575 }
2576
2577 /*
2578 * Save the CPU state into the current TSS.
2579 */
2580 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2581 if (GCPtrNewTSS == GCPtrCurTSS)
2582 {
2583 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2584 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2585 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2586 }
2587 if (fIsNewTSS386)
2588 {
2589 /*
2590 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2591 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2592 */
2593 void *pvCurTSS32;
2594 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2595 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2596 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2597 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2598 if (rcStrict != VINF_SUCCESS)
2599 {
2600 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2601 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2602 return rcStrict;
2603 }
2604
2605 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2606 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2607 pCurTSS32->eip = uNextEip;
2608 pCurTSS32->eflags = u32EFlags;
2609 pCurTSS32->eax = pCtx->eax;
2610 pCurTSS32->ecx = pCtx->ecx;
2611 pCurTSS32->edx = pCtx->edx;
2612 pCurTSS32->ebx = pCtx->ebx;
2613 pCurTSS32->esp = pCtx->esp;
2614 pCurTSS32->ebp = pCtx->ebp;
2615 pCurTSS32->esi = pCtx->esi;
2616 pCurTSS32->edi = pCtx->edi;
2617 pCurTSS32->es = pCtx->es.Sel;
2618 pCurTSS32->cs = pCtx->cs.Sel;
2619 pCurTSS32->ss = pCtx->ss.Sel;
2620 pCurTSS32->ds = pCtx->ds.Sel;
2621 pCurTSS32->fs = pCtx->fs.Sel;
2622 pCurTSS32->gs = pCtx->gs.Sel;
2623
2624 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2625 if (rcStrict != VINF_SUCCESS)
2626 {
2627 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2628 VBOXSTRICTRC_VAL(rcStrict)));
2629 return rcStrict;
2630 }
2631 }
2632 else
2633 {
2634 /*
2635 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2636 */
2637 void *pvCurTSS16;
2638 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2639 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2640 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2641 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2645 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2646 return rcStrict;
2647 }
2648
2649 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2650 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2651 pCurTSS16->ip = uNextEip;
2652 pCurTSS16->flags = u32EFlags;
2653 pCurTSS16->ax = pCtx->ax;
2654 pCurTSS16->cx = pCtx->cx;
2655 pCurTSS16->dx = pCtx->dx;
2656 pCurTSS16->bx = pCtx->bx;
2657 pCurTSS16->sp = pCtx->sp;
2658 pCurTSS16->bp = pCtx->bp;
2659 pCurTSS16->si = pCtx->si;
2660 pCurTSS16->di = pCtx->di;
2661 pCurTSS16->es = pCtx->es.Sel;
2662 pCurTSS16->cs = pCtx->cs.Sel;
2663 pCurTSS16->ss = pCtx->ss.Sel;
2664 pCurTSS16->ds = pCtx->ds.Sel;
2665
2666 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2667 if (rcStrict != VINF_SUCCESS)
2668 {
2669 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2670 VBOXSTRICTRC_VAL(rcStrict)));
2671 return rcStrict;
2672 }
2673 }
2674
2675 /*
2676 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2677 */
2678 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2679 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2680 {
2681 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2682 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2683 pNewTSS->selPrev = pCtx->tr.Sel;
2684 }
2685
2686 /*
2687 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2688 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2689 */
2690 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2691 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2692 bool fNewDebugTrap;
2693 if (fIsNewTSS386)
2694 {
2695 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2696 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2697 uNewEip = pNewTSS32->eip;
2698 uNewEflags = pNewTSS32->eflags;
2699 uNewEax = pNewTSS32->eax;
2700 uNewEcx = pNewTSS32->ecx;
2701 uNewEdx = pNewTSS32->edx;
2702 uNewEbx = pNewTSS32->ebx;
2703 uNewEsp = pNewTSS32->esp;
2704 uNewEbp = pNewTSS32->ebp;
2705 uNewEsi = pNewTSS32->esi;
2706 uNewEdi = pNewTSS32->edi;
2707 uNewES = pNewTSS32->es;
2708 uNewCS = pNewTSS32->cs;
2709 uNewSS = pNewTSS32->ss;
2710 uNewDS = pNewTSS32->ds;
2711 uNewFS = pNewTSS32->fs;
2712 uNewGS = pNewTSS32->gs;
2713 uNewLdt = pNewTSS32->selLdt;
2714 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2715 }
2716 else
2717 {
2718 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2719 uNewCr3 = 0;
2720 uNewEip = pNewTSS16->ip;
2721 uNewEflags = pNewTSS16->flags;
2722 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2723 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2724 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2725 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2726 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2727 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2728 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2729 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2730 uNewES = pNewTSS16->es;
2731 uNewCS = pNewTSS16->cs;
2732 uNewSS = pNewTSS16->ss;
2733 uNewDS = pNewTSS16->ds;
2734 uNewFS = 0;
2735 uNewGS = 0;
2736 uNewLdt = pNewTSS16->selLdt;
2737 fNewDebugTrap = false;
2738 }
2739
2740 if (GCPtrNewTSS == GCPtrCurTSS)
2741 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2742 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2743
2744 /*
2745 * We're done accessing the new TSS.
2746 */
2747 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2748 if (rcStrict != VINF_SUCCESS)
2749 {
2750 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2751 return rcStrict;
2752 }
2753
2754 /*
2755 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2756 */
2757 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2758 {
2759 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2760 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2761 if (rcStrict != VINF_SUCCESS)
2762 {
2763 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2764 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2765 return rcStrict;
2766 }
2767
2768 /* Check that the descriptor indicates the new TSS is available (not busy). */
2769 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2770 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2771 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2772
2773 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2774 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2775 if (rcStrict != VINF_SUCCESS)
2776 {
2777 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2778 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2779 return rcStrict;
2780 }
2781 }
2782
2783 /*
2784 * From this point on, we're technically in the new task. We will defer exceptions
2785 * until the completion of the task switch but before executing any instructions in the new task.
2786 */
2787 pCtx->tr.Sel = SelTSS;
2788 pCtx->tr.ValidSel = SelTSS;
2789 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2790 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2791 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2792 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2793 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2794
2795 /* Set the busy bit in TR. */
2796 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2797 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2798 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2799 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2800 {
2801 uNewEflags |= X86_EFL_NT;
2802 }
2803
2804 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2805 pCtx->cr0 |= X86_CR0_TS;
2806 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2807
2808 pCtx->eip = uNewEip;
2809 pCtx->eax = uNewEax;
2810 pCtx->ecx = uNewEcx;
2811 pCtx->edx = uNewEdx;
2812 pCtx->ebx = uNewEbx;
2813 pCtx->esp = uNewEsp;
2814 pCtx->ebp = uNewEbp;
2815 pCtx->esi = uNewEsi;
2816 pCtx->edi = uNewEdi;
2817
2818 uNewEflags &= X86_EFL_LIVE_MASK;
2819 uNewEflags |= X86_EFL_RA1_MASK;
2820 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2821
2822 /*
2823 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2824 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2825 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2826 */
2827 pCtx->es.Sel = uNewES;
2828 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2829 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2830
2831 pCtx->cs.Sel = uNewCS;
2832 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2833 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2834
2835 pCtx->ss.Sel = uNewSS;
2836 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2837 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2838
2839 pCtx->ds.Sel = uNewDS;
2840 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2841 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2842
2843 pCtx->fs.Sel = uNewFS;
2844 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2845 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2846
2847 pCtx->gs.Sel = uNewGS;
2848 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2849 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2850 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2851
2852 pCtx->ldtr.Sel = uNewLdt;
2853 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2854 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2855 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2856
2857 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2858 {
2859 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2860 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2861 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2862 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2863 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2864 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2865 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2866 }
2867
2868 /*
2869 * Switch CR3 for the new task.
2870 */
2871 if ( fIsNewTSS386
2872 && (pCtx->cr0 & X86_CR0_PG))
2873 {
2874 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2875 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2876 {
2877 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2878 AssertRCSuccessReturn(rc, rc);
2879 }
2880 else
2881 pCtx->cr3 = uNewCr3;
2882
2883 /* Inform PGM. */
2884 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2885 {
2886 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2887 AssertRCReturn(rc, rc);
2888 /* ignore informational status codes */
2889 }
2890 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2891 }
2892
2893 /*
2894 * Switch LDTR for the new task.
2895 */
2896 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2897 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2898 else
2899 {
2900 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2901
2902 IEMSELDESC DescNewLdt;
2903 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2904 if (rcStrict != VINF_SUCCESS)
2905 {
2906 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2907 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2908 return rcStrict;
2909 }
2910 if ( !DescNewLdt.Legacy.Gen.u1Present
2911 || DescNewLdt.Legacy.Gen.u1DescType
2912 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2913 {
2914 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2915 uNewLdt, DescNewLdt.Legacy.u));
2916 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2917 }
2918
2919 pCtx->ldtr.ValidSel = uNewLdt;
2920 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2921 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2922 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2923 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2924 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2925 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2926 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2927 }
2928
2929 IEMSELDESC DescSS;
2930 if (IEM_IS_V86_MODE(pIemCpu))
2931 {
2932 pIemCpu->uCpl = 3;
2933 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2934 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2935 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2936 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2937 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2938 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2939 }
2940 else
2941 {
2942 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2943
2944 /*
2945 * Load the stack segment for the new task.
2946 */
2947 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2948 {
2949 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2950 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2951 }
2952
2953 /* Fetch the descriptor. */
2954 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2955 if (rcStrict != VINF_SUCCESS)
2956 {
2957 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2958 VBOXSTRICTRC_VAL(rcStrict)));
2959 return rcStrict;
2960 }
2961
2962 /* SS must be a data segment and writable. */
2963 if ( !DescSS.Legacy.Gen.u1DescType
2964 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2965 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2966 {
2967 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2968 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2969 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2970 }
2971
2972 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2973 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2974 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2975 {
2976 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2977 uNewCpl));
2978 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 /* Is it there? */
2982 if (!DescSS.Legacy.Gen.u1Present)
2983 {
2984 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2985 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2986 }
2987
2988 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2989 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2990
2991 /* Set the accessed bit before committing the result into SS. */
2992 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2993 {
2994 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2995 if (rcStrict != VINF_SUCCESS)
2996 return rcStrict;
2997 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2998 }
2999
3000 /* Commit SS. */
3001 pCtx->ss.Sel = uNewSS;
3002 pCtx->ss.ValidSel = uNewSS;
3003 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3004 pCtx->ss.u32Limit = cbLimit;
3005 pCtx->ss.u64Base = u64Base;
3006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3008
3009 /* CPL has changed, update IEM before loading rest of segments. */
3010 pIemCpu->uCpl = uNewCpl;
3011
3012 /*
3013 * Load the data segments for the new task.
3014 */
3015 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3019 if (rcStrict != VINF_SUCCESS)
3020 return rcStrict;
3021 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3022 if (rcStrict != VINF_SUCCESS)
3023 return rcStrict;
3024 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3025 if (rcStrict != VINF_SUCCESS)
3026 return rcStrict;
3027
3028 /*
3029 * Load the code segment for the new task.
3030 */
3031 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3032 {
3033 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3034 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3035 }
3036
3037 /* Fetch the descriptor. */
3038 IEMSELDESC DescCS;
3039 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3040 if (rcStrict != VINF_SUCCESS)
3041 {
3042 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3043 return rcStrict;
3044 }
3045
3046 /* CS must be a code segment. */
3047 if ( !DescCS.Legacy.Gen.u1DescType
3048 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3049 {
3050 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3051 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3052 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3053 }
3054
3055 /* For conforming CS, DPL must be less than or equal to the RPL. */
3056 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3057 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3058 {
3059 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3060 DescCS.Legacy.Gen.u2Dpl));
3061 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3062 }
3063
3064 /* For non-conforming CS, DPL must match RPL. */
3065 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3066 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3067 {
3068 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3069 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3070 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3071 }
3072
3073 /* Is it there? */
3074 if (!DescCS.Legacy.Gen.u1Present)
3075 {
3076 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3077 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3078 }
3079
3080 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3081 u64Base = X86DESC_BASE(&DescCS.Legacy);
3082
3083 /* Set the accessed bit before committing the result into CS. */
3084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3085 {
3086 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3087 if (rcStrict != VINF_SUCCESS)
3088 return rcStrict;
3089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3090 }
3091
3092 /* Commit CS. */
3093 pCtx->cs.Sel = uNewCS;
3094 pCtx->cs.ValidSel = uNewCS;
3095 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3096 pCtx->cs.u32Limit = cbLimit;
3097 pCtx->cs.u64Base = u64Base;
3098 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3099 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3100 }
3101
3102 /** @todo Debug trap. */
3103 if (fIsNewTSS386 && fNewDebugTrap)
3104 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3105
3106 /*
3107 * Construct the error code masks based on what caused this task switch.
3108 * See Intel Instruction reference for INT.
3109 */
3110 uint16_t uExt;
3111 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3112 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3113 {
3114 uExt = 1;
3115 }
3116 else
3117 uExt = 0;
3118
3119 /*
3120 * Push any error code on to the new stack.
3121 */
3122 if (fFlags & IEM_XCPT_FLAGS_ERR)
3123 {
3124 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3125 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3126 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
3127 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
3128
3129 /* Check that there is sufficient space on the stack. */
3130 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3131 if ( pCtx->esp - 1 > cbLimitSS
3132 || pCtx->esp < cbStackFrame)
3133 {
3134 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3135 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3136 cbStackFrame));
3137 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3138 }
3139
3140 if (fIsNewTSS386)
3141 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3142 else
3143 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3144 if (rcStrict != VINF_SUCCESS)
3145 {
3146 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3147 VBOXSTRICTRC_VAL(rcStrict)));
3148 return rcStrict;
3149 }
3150 }
3151
3152 /* Check the new EIP against the new CS limit. */
3153 if (pCtx->eip > pCtx->cs.u32Limit)
3154 {
3155 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3156 pCtx->eip, pCtx->cs.u32Limit));
3157 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3158 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3159 }
3160
3161 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3162 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3163}
3164
3165
3166/**
3167 * Implements exceptions and interrupts for protected mode.
3168 *
3169 * @returns VBox strict status code.
3170 * @param pIemCpu The IEM per CPU instance data.
3171 * @param pCtx The CPU context.
3172 * @param cbInstr The number of bytes to offset rIP by in the return
3173 * address.
3174 * @param u8Vector The interrupt / exception vector number.
3175 * @param fFlags The flags.
3176 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3177 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3178 */
3179static VBOXSTRICTRC
3180iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3181 PCPUMCTX pCtx,
3182 uint8_t cbInstr,
3183 uint8_t u8Vector,
3184 uint32_t fFlags,
3185 uint16_t uErr,
3186 uint64_t uCr2)
3187{
3188 NOREF(cbInstr);
3189
3190 /*
3191 * Read the IDT entry.
3192 */
3193 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3194 {
3195 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3196 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3197 }
3198 X86DESC Idte;
3199 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3200 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3201 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3202 return rcStrict;
3203 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3204 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3205 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3206
3207 /*
3208 * Check the descriptor type, DPL and such.
3209 * ASSUMES this is done in the same order as described for call-gate calls.
3210 */
3211 if (Idte.Gate.u1DescType)
3212 {
3213 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3214 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3215 }
3216 bool fTaskGate = false;
3217 uint8_t f32BitGate = true;
3218 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3219 switch (Idte.Gate.u4Type)
3220 {
3221 case X86_SEL_TYPE_SYS_UNDEFINED:
3222 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3223 case X86_SEL_TYPE_SYS_LDT:
3224 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3225 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3226 case X86_SEL_TYPE_SYS_UNDEFINED2:
3227 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3228 case X86_SEL_TYPE_SYS_UNDEFINED3:
3229 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3230 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3231 case X86_SEL_TYPE_SYS_UNDEFINED4:
3232 {
3233 /** @todo check what actually happens when the type is wrong...
3234 * esp. call gates. */
3235 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3236 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3237 }
3238
3239 case X86_SEL_TYPE_SYS_286_INT_GATE:
3240 f32BitGate = false;
3241 case X86_SEL_TYPE_SYS_386_INT_GATE:
3242 fEflToClear |= X86_EFL_IF;
3243 break;
3244
3245 case X86_SEL_TYPE_SYS_TASK_GATE:
3246 fTaskGate = true;
3247#ifndef IEM_IMPLEMENTS_TASKSWITCH
3248 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3249#endif
3250 break;
3251
3252 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3253 f32BitGate = false;
3254 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3255 break;
3256
3257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3258 }
3259
3260 /* Check DPL against CPL if applicable. */
3261 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3262 {
3263 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3264 {
3265 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3266 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3267 }
3268 }
3269
3270 /* Is it there? */
3271 if (!Idte.Gate.u1Present)
3272 {
3273 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3274 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3275 }
3276
3277 /* Is it a task-gate? */
3278 if (fTaskGate)
3279 {
3280 /*
3281 * Construct the error code masks based on what caused this task switch.
3282 * See Intel Instruction reference for INT.
3283 */
3284 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3285 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3286 RTSEL SelTSS = Idte.Gate.u16Sel;
3287
3288 /*
3289 * Fetch the TSS descriptor in the GDT.
3290 */
3291 IEMSELDESC DescTSS;
3292 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3293 if (rcStrict != VINF_SUCCESS)
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3296 VBOXSTRICTRC_VAL(rcStrict)));
3297 return rcStrict;
3298 }
3299
3300 /* The TSS descriptor must be a system segment and be available (not busy). */
3301 if ( DescTSS.Legacy.Gen.u1DescType
3302 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3303 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3306 u8Vector, SelTSS, DescTSS.Legacy.au64));
3307 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3308 }
3309
3310 /* The TSS must be present. */
3311 if (!DescTSS.Legacy.Gen.u1Present)
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3314 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3315 }
3316
3317 /* Do the actual task switch. */
3318 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3319 }
3320
3321 /* A null CS is bad. */
3322 RTSEL NewCS = Idte.Gate.u16Sel;
3323 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3324 {
3325 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3326 return iemRaiseGeneralProtectionFault0(pIemCpu);
3327 }
3328
3329 /* Fetch the descriptor for the new CS. */
3330 IEMSELDESC DescCS;
3331 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3332 if (rcStrict != VINF_SUCCESS)
3333 {
3334 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3335 return rcStrict;
3336 }
3337
3338 /* Must be a code segment. */
3339 if (!DescCS.Legacy.Gen.u1DescType)
3340 {
3341 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3342 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3343 }
3344 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3345 {
3346 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3347 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3348 }
3349
3350 /* Don't allow lowering the privilege level. */
3351 /** @todo Does the lowering of privileges apply to software interrupts
3352 * only? This has bearings on the more-privileged or
3353 * same-privilege stack behavior further down. A testcase would
3354 * be nice. */
3355 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3356 {
3357 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3358 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3359 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3360 }
3361
3362 /* Make sure the selector is present. */
3363 if (!DescCS.Legacy.Gen.u1Present)
3364 {
3365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3366 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3367 }
3368
3369 /* Check the new EIP against the new CS limit. */
3370 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3371 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3372 ? Idte.Gate.u16OffsetLow
3373 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3374 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3375 if (uNewEip > cbLimitCS)
3376 {
3377 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3378 u8Vector, uNewEip, cbLimitCS, NewCS));
3379 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3380 }
3381
3382 /* Calc the flag image to push. */
3383 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3384 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3385 fEfl &= ~X86_EFL_RF;
3386 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3387 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3388
3389 /* From V8086 mode only go to CPL 0. */
3390 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3391 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3392 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3393 {
3394 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3395 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3396 }
3397
3398 /*
3399 * If the privilege level changes, we need to get a new stack from the TSS.
3400 * This in turns means validating the new SS and ESP...
3401 */
3402 if (uNewCpl != pIemCpu->uCpl)
3403 {
3404 RTSEL NewSS;
3405 uint32_t uNewEsp;
3406 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3407 if (rcStrict != VINF_SUCCESS)
3408 return rcStrict;
3409
3410 IEMSELDESC DescSS;
3411 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3412 if (rcStrict != VINF_SUCCESS)
3413 return rcStrict;
3414
3415 /* Check that there is sufficient space for the stack frame. */
3416 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3417 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
3418 {
3419 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
3420 }
3421
3422 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3423 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3424 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3425 if ( uNewEsp - 1 > cbLimitSS
3426 || uNewEsp < cbStackFrame)
3427 {
3428 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3429 u8Vector, NewSS, uNewEsp, cbStackFrame));
3430 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3431 }
3432
3433 /*
3434 * Start making changes.
3435 */
3436
3437 /* Create the stack frame. */
3438 RTPTRUNION uStackFrame;
3439 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3440 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3441 if (rcStrict != VINF_SUCCESS)
3442 return rcStrict;
3443 void * const pvStackFrame = uStackFrame.pv;
3444 if (f32BitGate)
3445 {
3446 if (fFlags & IEM_XCPT_FLAGS_ERR)
3447 *uStackFrame.pu32++ = uErr;
3448 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3449 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3450 uStackFrame.pu32[2] = fEfl;
3451 uStackFrame.pu32[3] = pCtx->esp;
3452 uStackFrame.pu32[4] = pCtx->ss.Sel;
3453 if (fEfl & X86_EFL_VM)
3454 {
3455 uStackFrame.pu32[1] = pCtx->cs.Sel;
3456 uStackFrame.pu32[5] = pCtx->es.Sel;
3457 uStackFrame.pu32[6] = pCtx->ds.Sel;
3458 uStackFrame.pu32[7] = pCtx->fs.Sel;
3459 uStackFrame.pu32[8] = pCtx->gs.Sel;
3460 }
3461 }
3462 else
3463 {
3464 if (fFlags & IEM_XCPT_FLAGS_ERR)
3465 *uStackFrame.pu16++ = uErr;
3466 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3467 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3468 uStackFrame.pu16[2] = fEfl;
3469 uStackFrame.pu16[3] = pCtx->sp;
3470 uStackFrame.pu16[4] = pCtx->ss.Sel;
3471 if (fEfl & X86_EFL_VM)
3472 {
3473 uStackFrame.pu16[1] = pCtx->cs.Sel;
3474 uStackFrame.pu16[5] = pCtx->es.Sel;
3475 uStackFrame.pu16[6] = pCtx->ds.Sel;
3476 uStackFrame.pu16[7] = pCtx->fs.Sel;
3477 uStackFrame.pu16[8] = pCtx->gs.Sel;
3478 }
3479 }
3480 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3481 if (rcStrict != VINF_SUCCESS)
3482 return rcStrict;
3483
3484 /* Mark the selectors 'accessed' (hope this is the correct time). */
3485 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3486 * after pushing the stack frame? (Write protect the gdt + stack to
3487 * find out.) */
3488 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3489 {
3490 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3494 }
3495
3496 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3497 {
3498 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3502 }
3503
3504 /*
3505 * Start comitting the register changes (joins with the DPL=CPL branch).
3506 */
3507 pCtx->ss.Sel = NewSS;
3508 pCtx->ss.ValidSel = NewSS;
3509 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3510 pCtx->ss.u32Limit = cbLimitSS;
3511 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3512 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3513 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3514 pIemCpu->uCpl = uNewCpl;
3515
3516 if (fEfl & X86_EFL_VM)
3517 {
3518 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3519 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3520 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3521 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3522 }
3523 }
3524 /*
3525 * Same privilege, no stack change and smaller stack frame.
3526 */
3527 else
3528 {
3529 uint64_t uNewRsp;
3530 RTPTRUNION uStackFrame;
3531 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3532 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3533 if (rcStrict != VINF_SUCCESS)
3534 return rcStrict;
3535 void * const pvStackFrame = uStackFrame.pv;
3536
3537 if (f32BitGate)
3538 {
3539 if (fFlags & IEM_XCPT_FLAGS_ERR)
3540 *uStackFrame.pu32++ = uErr;
3541 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3542 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3543 uStackFrame.pu32[2] = fEfl;
3544 }
3545 else
3546 {
3547 if (fFlags & IEM_XCPT_FLAGS_ERR)
3548 *uStackFrame.pu16++ = uErr;
3549 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3550 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3551 uStackFrame.pu16[2] = fEfl;
3552 }
3553 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3554 if (rcStrict != VINF_SUCCESS)
3555 return rcStrict;
3556
3557 /* Mark the CS selector as 'accessed'. */
3558 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3559 {
3560 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3561 if (rcStrict != VINF_SUCCESS)
3562 return rcStrict;
3563 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3564 }
3565
3566 /*
3567 * Start committing the register changes (joins with the other branch).
3568 */
3569 pCtx->rsp = uNewRsp;
3570 }
3571
3572 /* ... register committing continues. */
3573 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3574 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3575 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3576 pCtx->cs.u32Limit = cbLimitCS;
3577 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3578 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3579
3580 pCtx->rip = uNewEip;
3581 fEfl &= ~fEflToClear;
3582 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3583
3584 if (fFlags & IEM_XCPT_FLAGS_CR2)
3585 pCtx->cr2 = uCr2;
3586
3587 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3588 iemRaiseXcptAdjustState(pCtx, u8Vector);
3589
3590 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3591}
3592
3593
3594/**
3595 * Implements exceptions and interrupts for long mode.
3596 *
3597 * @returns VBox strict status code.
3598 * @param pIemCpu The IEM per CPU instance data.
3599 * @param pCtx The CPU context.
3600 * @param cbInstr The number of bytes to offset rIP by in the return
3601 * address.
3602 * @param u8Vector The interrupt / exception vector number.
3603 * @param fFlags The flags.
3604 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3605 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3606 */
3607static VBOXSTRICTRC
3608iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3609 PCPUMCTX pCtx,
3610 uint8_t cbInstr,
3611 uint8_t u8Vector,
3612 uint32_t fFlags,
3613 uint16_t uErr,
3614 uint64_t uCr2)
3615{
3616 NOREF(cbInstr);
3617
3618 /*
3619 * Read the IDT entry.
3620 */
3621 uint16_t offIdt = (uint16_t)u8Vector << 4;
3622 if (pCtx->idtr.cbIdt < offIdt + 7)
3623 {
3624 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3625 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3626 }
3627 X86DESC64 Idte;
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 return rcStrict;
3633 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3634 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3635 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3636
3637 /*
3638 * Check the descriptor type, DPL and such.
3639 * ASSUMES this is done in the same order as described for call-gate calls.
3640 */
3641 if (Idte.Gate.u1DescType)
3642 {
3643 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3644 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3645 }
3646 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3647 switch (Idte.Gate.u4Type)
3648 {
3649 case AMD64_SEL_TYPE_SYS_INT_GATE:
3650 fEflToClear |= X86_EFL_IF;
3651 break;
3652 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3653 break;
3654
3655 default:
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3657 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3658 }
3659
3660 /* Check DPL against CPL if applicable. */
3661 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3662 {
3663 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3664 {
3665 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3666 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 }
3669
3670 /* Is it there? */
3671 if (!Idte.Gate.u1Present)
3672 {
3673 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3674 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3675 }
3676
3677 /* A null CS is bad. */
3678 RTSEL NewCS = Idte.Gate.u16Sel;
3679 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3680 {
3681 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3682 return iemRaiseGeneralProtectionFault0(pIemCpu);
3683 }
3684
3685 /* Fetch the descriptor for the new CS. */
3686 IEMSELDESC DescCS;
3687 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3688 if (rcStrict != VINF_SUCCESS)
3689 {
3690 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3691 return rcStrict;
3692 }
3693
3694 /* Must be a 64-bit code segment. */
3695 if (!DescCS.Long.Gen.u1DescType)
3696 {
3697 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3698 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3699 }
3700 if ( !DescCS.Long.Gen.u1Long
3701 || DescCS.Long.Gen.u1DefBig
3702 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3703 {
3704 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3705 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3706 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3707 }
3708
3709 /* Don't allow lowering the privilege level. For non-conforming CS
3710 selectors, the CS.DPL sets the privilege level the trap/interrupt
3711 handler runs at. For conforming CS selectors, the CPL remains
3712 unchanged, but the CS.DPL must be <= CPL. */
3713 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3714 * when CPU in Ring-0. Result \#GP? */
3715 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3716 {
3717 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3718 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3719 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3720 }
3721
3722
3723 /* Make sure the selector is present. */
3724 if (!DescCS.Legacy.Gen.u1Present)
3725 {
3726 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3727 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3728 }
3729
3730 /* Check that the new RIP is canonical. */
3731 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3732 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3733 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3734 if (!IEM_IS_CANONICAL(uNewRip))
3735 {
3736 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3737 return iemRaiseGeneralProtectionFault0(pIemCpu);
3738 }
3739
3740 /*
3741 * If the privilege level changes or if the IST isn't zero, we need to get
3742 * a new stack from the TSS.
3743 */
3744 uint64_t uNewRsp;
3745 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3746 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3747 if ( uNewCpl != pIemCpu->uCpl
3748 || Idte.Gate.u3IST != 0)
3749 {
3750 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3751 if (rcStrict != VINF_SUCCESS)
3752 return rcStrict;
3753 }
3754 else
3755 uNewRsp = pCtx->rsp;
3756 uNewRsp &= ~(uint64_t)0xf;
3757
3758 /*
3759 * Calc the flag image to push.
3760 */
3761 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3762 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3763 fEfl &= ~X86_EFL_RF;
3764 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3765 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3766
3767 /*
3768 * Start making changes.
3769 */
3770
3771 /* Create the stack frame. */
3772 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3773 RTPTRUNION uStackFrame;
3774 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3775 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3776 if (rcStrict != VINF_SUCCESS)
3777 return rcStrict;
3778 void * const pvStackFrame = uStackFrame.pv;
3779
3780 if (fFlags & IEM_XCPT_FLAGS_ERR)
3781 *uStackFrame.pu64++ = uErr;
3782 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3783 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3784 uStackFrame.pu64[2] = fEfl;
3785 uStackFrame.pu64[3] = pCtx->rsp;
3786 uStackFrame.pu64[4] = pCtx->ss.Sel;
3787 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3788 if (rcStrict != VINF_SUCCESS)
3789 return rcStrict;
3790
3791 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3792 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3793 * after pushing the stack frame? (Write protect the gdt + stack to
3794 * find out.) */
3795 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3796 {
3797 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3798 if (rcStrict != VINF_SUCCESS)
3799 return rcStrict;
3800 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3801 }
3802
3803 /*
3804 * Start comitting the register changes.
3805 */
3806 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3807 * hidden registers when interrupting 32-bit or 16-bit code! */
3808 if (uNewCpl != pIemCpu->uCpl)
3809 {
3810 pCtx->ss.Sel = 0 | uNewCpl;
3811 pCtx->ss.ValidSel = 0 | uNewCpl;
3812 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3813 pCtx->ss.u32Limit = UINT32_MAX;
3814 pCtx->ss.u64Base = 0;
3815 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3816 }
3817 pCtx->rsp = uNewRsp - cbStackFrame;
3818 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3819 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3820 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3822 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3823 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3824 pCtx->rip = uNewRip;
3825 pIemCpu->uCpl = uNewCpl;
3826
3827 fEfl &= ~fEflToClear;
3828 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3829
3830 if (fFlags & IEM_XCPT_FLAGS_CR2)
3831 pCtx->cr2 = uCr2;
3832
3833 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3834 iemRaiseXcptAdjustState(pCtx, u8Vector);
3835
3836 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3837}
3838
3839
3840/**
3841 * Implements exceptions and interrupts.
3842 *
3843 * All exceptions and interrupts goes thru this function!
3844 *
3845 * @returns VBox strict status code.
3846 * @param pIemCpu The IEM per CPU instance data.
3847 * @param cbInstr The number of bytes to offset rIP by in the return
3848 * address.
3849 * @param u8Vector The interrupt / exception vector number.
3850 * @param fFlags The flags.
3851 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3852 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3853 */
3854DECL_NO_INLINE(static, VBOXSTRICTRC)
3855iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3856 uint8_t cbInstr,
3857 uint8_t u8Vector,
3858 uint32_t fFlags,
3859 uint16_t uErr,
3860 uint64_t uCr2)
3861{
3862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3863
3864 /*
3865 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3866 */
3867 if ( pCtx->eflags.Bits.u1VM
3868 && pCtx->eflags.Bits.u2IOPL != 3
3869 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3870 && (pCtx->cr0 & X86_CR0_PE) )
3871 {
3872 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3873 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3874 u8Vector = X86_XCPT_GP;
3875 uErr = 0;
3876 }
3877#ifdef DBGFTRACE_ENABLED
3878 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3879 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3880 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3881#endif
3882
3883 /*
3884 * Do recursion accounting.
3885 */
3886 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3887 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3888 if (pIemCpu->cXcptRecursions == 0)
3889 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3890 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3891 else
3892 {
3893 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3894 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3895
3896 /** @todo double and tripple faults. */
3897 if (pIemCpu->cXcptRecursions >= 3)
3898 {
3899#ifdef DEBUG_bird
3900 AssertFailed();
3901#endif
3902 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3903 }
3904
3905 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3906 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3907 {
3908 ....
3909 } */
3910 }
3911 pIemCpu->cXcptRecursions++;
3912 pIemCpu->uCurXcpt = u8Vector;
3913 pIemCpu->fCurXcpt = fFlags;
3914
3915 /*
3916 * Extensive logging.
3917 */
3918#if defined(LOG_ENABLED) && defined(IN_RING3)
3919 if (LogIs3Enabled())
3920 {
3921 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3922 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3923 char szRegs[4096];
3924 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3925 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3926 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3927 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3928 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3929 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3930 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3931 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3932 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3933 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3934 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3935 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3936 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3937 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3938 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3939 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3940 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3941 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3942 " efer=%016VR{efer}\n"
3943 " pat=%016VR{pat}\n"
3944 " sf_mask=%016VR{sf_mask}\n"
3945 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3946 " lstar=%016VR{lstar}\n"
3947 " star=%016VR{star} cstar=%016VR{cstar}\n"
3948 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3949 );
3950
3951 char szInstr[256];
3952 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3953 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3954 szInstr, sizeof(szInstr), NULL);
3955 Log3(("%s%s\n", szRegs, szInstr));
3956 }
3957#endif /* LOG_ENABLED */
3958
3959 /*
3960 * Call the mode specific worker function.
3961 */
3962 VBOXSTRICTRC rcStrict;
3963 if (!(pCtx->cr0 & X86_CR0_PE))
3964 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3965 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3966 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3967 else
3968 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3969
3970 /*
3971 * Unwind.
3972 */
3973 pIemCpu->cXcptRecursions--;
3974 pIemCpu->uCurXcpt = uPrevXcpt;
3975 pIemCpu->fCurXcpt = fPrevXcpt;
3976 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
3977 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
3978 return rcStrict;
3979}
3980
3981
3982/** \#DE - 00. */
3983DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
3984{
3985 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3986}
3987
3988
3989/** \#DB - 01.
3990 * @note This automatically clear DR7.GD. */
3991DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
3992{
3993 /** @todo set/clear RF. */
3994 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
3995 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3996}
3997
3998
3999/** \#UD - 06. */
4000DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4001{
4002 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4003}
4004
4005
4006/** \#NM - 07. */
4007DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4008{
4009 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4010}
4011
4012
4013/** \#TS(err) - 0a. */
4014DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4015{
4016 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4017}
4018
4019
4020/** \#TS(tr) - 0a. */
4021DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4022{
4023 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4024 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4025}
4026
4027
4028/** \#TS(0) - 0a. */
4029DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4030{
4031 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4032 0, 0);
4033}
4034
4035
4036/** \#TS(err) - 0a. */
4037DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4038{
4039 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4040 uSel & X86_SEL_MASK_OFF_RPL, 0);
4041}
4042
4043
4044/** \#NP(err) - 0b. */
4045DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4046{
4047 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4048}
4049
4050
4051/** \#NP(seg) - 0b. */
4052DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4053{
4054 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4055 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4056}
4057
4058
4059/** \#NP(sel) - 0b. */
4060DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4061{
4062 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4063 uSel & ~X86_SEL_RPL, 0);
4064}
4065
4066
4067/** \#SS(seg) - 0c. */
4068DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4069{
4070 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4071 uSel & ~X86_SEL_RPL, 0);
4072}
4073
4074
4075/** \#SS(err) - 0c. */
4076DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4077{
4078 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4079}
4080
4081
4082/** \#GP(n) - 0d. */
4083DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4084{
4085 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4086}
4087
4088
4089/** \#GP(0) - 0d. */
4090DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4091{
4092 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4093}
4094
4095
4096/** \#GP(sel) - 0d. */
4097DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4098{
4099 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4100 Sel & ~X86_SEL_RPL, 0);
4101}
4102
4103
4104/** \#GP(0) - 0d. */
4105DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4106{
4107 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4108}
4109
4110
4111/** \#GP(sel) - 0d. */
4112DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4113{
4114 NOREF(iSegReg); NOREF(fAccess);
4115 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4116 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4117}
4118
4119
4120/** \#GP(sel) - 0d. */
4121DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4122{
4123 NOREF(Sel);
4124 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4125}
4126
4127
4128/** \#GP(sel) - 0d. */
4129DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4130{
4131 NOREF(iSegReg); NOREF(fAccess);
4132 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4133}
4134
4135
4136/** \#PF(n) - 0e. */
4137DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4138{
4139 uint16_t uErr;
4140 switch (rc)
4141 {
4142 case VERR_PAGE_NOT_PRESENT:
4143 case VERR_PAGE_TABLE_NOT_PRESENT:
4144 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4145 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4146 uErr = 0;
4147 break;
4148
4149 default:
4150 AssertMsgFailed(("%Rrc\n", rc));
4151 case VERR_ACCESS_DENIED:
4152 uErr = X86_TRAP_PF_P;
4153 break;
4154
4155 /** @todo reserved */
4156 }
4157
4158 if (pIemCpu->uCpl == 3)
4159 uErr |= X86_TRAP_PF_US;
4160
4161 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4162 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4163 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4164 uErr |= X86_TRAP_PF_ID;
4165
4166#if 0 /* This is so much non-sense, really. Why was it done like that? */
4167 /* Note! RW access callers reporting a WRITE protection fault, will clear
4168 the READ flag before calling. So, read-modify-write accesses (RW)
4169 can safely be reported as READ faults. */
4170 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4171 uErr |= X86_TRAP_PF_RW;
4172#else
4173 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4174 {
4175 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4176 uErr |= X86_TRAP_PF_RW;
4177 }
4178#endif
4179
4180 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4181 uErr, GCPtrWhere);
4182}
4183
4184
4185/** \#MF(0) - 10. */
4186DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4187{
4188 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4189}
4190
4191
4192/** \#AC(0) - 11. */
4193DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4194{
4195 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4196}
4197
4198
4199/**
4200 * Macro for calling iemCImplRaiseDivideError().
4201 *
4202 * This enables us to add/remove arguments and force different levels of
4203 * inlining as we wish.
4204 *
4205 * @return Strict VBox status code.
4206 */
4207#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4208IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4209{
4210 NOREF(cbInstr);
4211 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4212}
4213
4214
4215/**
4216 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4217 *
4218 * This enables us to add/remove arguments and force different levels of
4219 * inlining as we wish.
4220 *
4221 * @return Strict VBox status code.
4222 */
4223#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4224IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4225{
4226 NOREF(cbInstr);
4227 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4228}
4229
4230
4231/**
4232 * Macro for calling iemCImplRaiseInvalidOpcode().
4233 *
4234 * This enables us to add/remove arguments and force different levels of
4235 * inlining as we wish.
4236 *
4237 * @return Strict VBox status code.
4238 */
4239#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4240IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4241{
4242 NOREF(cbInstr);
4243 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4244}
4245
4246
4247/** @} */
4248
4249
4250/*
4251 *
4252 * Helpers routines.
4253 * Helpers routines.
4254 * Helpers routines.
4255 *
4256 */
4257
4258/**
4259 * Recalculates the effective operand size.
4260 *
4261 * @param pIemCpu The IEM state.
4262 */
4263static void iemRecalEffOpSize(PIEMCPU pIemCpu)
4264{
4265 switch (pIemCpu->enmCpuMode)
4266 {
4267 case IEMMODE_16BIT:
4268 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4269 break;
4270 case IEMMODE_32BIT:
4271 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4272 break;
4273 case IEMMODE_64BIT:
4274 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4275 {
4276 case 0:
4277 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4278 break;
4279 case IEM_OP_PRF_SIZE_OP:
4280 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4281 break;
4282 case IEM_OP_PRF_SIZE_REX_W:
4283 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4284 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4285 break;
4286 }
4287 break;
4288 default:
4289 AssertFailed();
4290 }
4291}
4292
4293
4294/**
4295 * Sets the default operand size to 64-bit and recalculates the effective
4296 * operand size.
4297 *
4298 * @param pIemCpu The IEM state.
4299 */
4300static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4301{
4302 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4303 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4304 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4305 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4306 else
4307 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4308}
4309
4310
4311/*
4312 *
4313 * Common opcode decoders.
4314 * Common opcode decoders.
4315 * Common opcode decoders.
4316 *
4317 */
4318//#include <iprt/mem.h>
4319
4320/**
4321 * Used to add extra details about a stub case.
4322 * @param pIemCpu The IEM per CPU state.
4323 */
4324static void iemOpStubMsg2(PIEMCPU pIemCpu)
4325{
4326#if defined(LOG_ENABLED) && defined(IN_RING3)
4327 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4328 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4329 char szRegs[4096];
4330 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4331 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4332 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4333 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4334 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4335 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4336 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4337 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4338 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4339 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4340 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4341 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4342 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4343 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4344 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4345 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4346 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4347 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4348 " efer=%016VR{efer}\n"
4349 " pat=%016VR{pat}\n"
4350 " sf_mask=%016VR{sf_mask}\n"
4351 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4352 " lstar=%016VR{lstar}\n"
4353 " star=%016VR{star} cstar=%016VR{cstar}\n"
4354 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4355 );
4356
4357 char szInstr[256];
4358 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4359 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4360 szInstr, sizeof(szInstr), NULL);
4361
4362 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4363#else
4364 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4365#endif
4366}
4367
4368/**
4369 * Complains about a stub.
4370 *
4371 * Providing two versions of this macro, one for daily use and one for use when
4372 * working on IEM.
4373 */
4374#if 0
4375# define IEMOP_BITCH_ABOUT_STUB() \
4376 do { \
4377 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4378 iemOpStubMsg2(pIemCpu); \
4379 RTAssertPanic(); \
4380 } while (0)
4381#else
4382# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4383#endif
4384
4385/** Stubs an opcode. */
4386#define FNIEMOP_STUB(a_Name) \
4387 FNIEMOP_DEF(a_Name) \
4388 { \
4389 IEMOP_BITCH_ABOUT_STUB(); \
4390 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4391 } \
4392 typedef int ignore_semicolon
4393
4394/** Stubs an opcode. */
4395#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4396 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4397 { \
4398 IEMOP_BITCH_ABOUT_STUB(); \
4399 NOREF(a_Name0); \
4400 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4401 } \
4402 typedef int ignore_semicolon
4403
4404/** Stubs an opcode which currently should raise \#UD. */
4405#define FNIEMOP_UD_STUB(a_Name) \
4406 FNIEMOP_DEF(a_Name) \
4407 { \
4408 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4409 return IEMOP_RAISE_INVALID_OPCODE(); \
4410 } \
4411 typedef int ignore_semicolon
4412
4413/** Stubs an opcode which currently should raise \#UD. */
4414#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4415 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4416 { \
4417 NOREF(a_Name0); \
4418 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4419 return IEMOP_RAISE_INVALID_OPCODE(); \
4420 } \
4421 typedef int ignore_semicolon
4422
4423
4424
4425/** @name Register Access.
4426 * @{
4427 */
4428
4429/**
4430 * Gets a reference (pointer) to the specified hidden segment register.
4431 *
4432 * @returns Hidden register reference.
4433 * @param pIemCpu The per CPU data.
4434 * @param iSegReg The segment register.
4435 */
4436static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4437{
4438 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4439 PCPUMSELREG pSReg;
4440 switch (iSegReg)
4441 {
4442 case X86_SREG_ES: pSReg = &pCtx->es; break;
4443 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4444 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4445 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4446 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4447 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4448 default:
4449 AssertFailedReturn(NULL);
4450 }
4451#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4452 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4453 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4454#else
4455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4456#endif
4457 return pSReg;
4458}
4459
4460
4461/**
4462 * Gets a reference (pointer) to the specified segment register (the selector
4463 * value).
4464 *
4465 * @returns Pointer to the selector variable.
4466 * @param pIemCpu The per CPU data.
4467 * @param iSegReg The segment register.
4468 */
4469static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4470{
4471 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4472 switch (iSegReg)
4473 {
4474 case X86_SREG_ES: return &pCtx->es.Sel;
4475 case X86_SREG_CS: return &pCtx->cs.Sel;
4476 case X86_SREG_SS: return &pCtx->ss.Sel;
4477 case X86_SREG_DS: return &pCtx->ds.Sel;
4478 case X86_SREG_FS: return &pCtx->fs.Sel;
4479 case X86_SREG_GS: return &pCtx->gs.Sel;
4480 }
4481 AssertFailedReturn(NULL);
4482}
4483
4484
4485/**
4486 * Fetches the selector value of a segment register.
4487 *
4488 * @returns The selector value.
4489 * @param pIemCpu The per CPU data.
4490 * @param iSegReg The segment register.
4491 */
4492static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4493{
4494 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4495 switch (iSegReg)
4496 {
4497 case X86_SREG_ES: return pCtx->es.Sel;
4498 case X86_SREG_CS: return pCtx->cs.Sel;
4499 case X86_SREG_SS: return pCtx->ss.Sel;
4500 case X86_SREG_DS: return pCtx->ds.Sel;
4501 case X86_SREG_FS: return pCtx->fs.Sel;
4502 case X86_SREG_GS: return pCtx->gs.Sel;
4503 }
4504 AssertFailedReturn(0xffff);
4505}
4506
4507
4508/**
4509 * Gets a reference (pointer) to the specified general register.
4510 *
4511 * @returns Register reference.
4512 * @param pIemCpu The per CPU data.
4513 * @param iReg The general register.
4514 */
4515static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4516{
4517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4518 switch (iReg)
4519 {
4520 case X86_GREG_xAX: return &pCtx->rax;
4521 case X86_GREG_xCX: return &pCtx->rcx;
4522 case X86_GREG_xDX: return &pCtx->rdx;
4523 case X86_GREG_xBX: return &pCtx->rbx;
4524 case X86_GREG_xSP: return &pCtx->rsp;
4525 case X86_GREG_xBP: return &pCtx->rbp;
4526 case X86_GREG_xSI: return &pCtx->rsi;
4527 case X86_GREG_xDI: return &pCtx->rdi;
4528 case X86_GREG_x8: return &pCtx->r8;
4529 case X86_GREG_x9: return &pCtx->r9;
4530 case X86_GREG_x10: return &pCtx->r10;
4531 case X86_GREG_x11: return &pCtx->r11;
4532 case X86_GREG_x12: return &pCtx->r12;
4533 case X86_GREG_x13: return &pCtx->r13;
4534 case X86_GREG_x14: return &pCtx->r14;
4535 case X86_GREG_x15: return &pCtx->r15;
4536 }
4537 AssertFailedReturn(NULL);
4538}
4539
4540
4541/**
4542 * Gets a reference (pointer) to the specified 8-bit general register.
4543 *
4544 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4545 *
4546 * @returns Register reference.
4547 * @param pIemCpu The per CPU data.
4548 * @param iReg The register.
4549 */
4550static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4551{
4552 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4553 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4554
4555 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4556 if (iReg >= 4)
4557 pu8Reg++;
4558 return pu8Reg;
4559}
4560
4561
4562/**
4563 * Fetches the value of a 8-bit general register.
4564 *
4565 * @returns The register value.
4566 * @param pIemCpu The per CPU data.
4567 * @param iReg The register.
4568 */
4569static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4570{
4571 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4572 return *pbSrc;
4573}
4574
4575
4576/**
4577 * Fetches the value of a 16-bit general register.
4578 *
4579 * @returns The register value.
4580 * @param pIemCpu The per CPU data.
4581 * @param iReg The register.
4582 */
4583static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4584{
4585 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4586}
4587
4588
4589/**
4590 * Fetches the value of a 32-bit general register.
4591 *
4592 * @returns The register value.
4593 * @param pIemCpu The per CPU data.
4594 * @param iReg The register.
4595 */
4596static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4597{
4598 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4599}
4600
4601
4602/**
4603 * Fetches the value of a 64-bit general register.
4604 *
4605 * @returns The register value.
4606 * @param pIemCpu The per CPU data.
4607 * @param iReg The register.
4608 */
4609static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4610{
4611 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4612}
4613
4614
4615/**
4616 * Is the FPU state in FXSAVE format or not.
4617 *
4618 * @returns true if it is, false if it's in FNSAVE.
4619 * @param pVCpu Pointer to the VMCPU.
4620 */
4621DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
4622{
4623#ifdef RT_ARCH_AMD64
4624 NOREF(pIemCpu);
4625 return true;
4626#else
4627 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
4628 return true;
4629#endif
4630}
4631
4632
4633/**
4634 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4635 *
4636 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4637 * segment limit.
4638 *
4639 * @param pIemCpu The per CPU data.
4640 * @param offNextInstr The offset of the next instruction.
4641 */
4642static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4643{
4644 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4645 switch (pIemCpu->enmEffOpSize)
4646 {
4647 case IEMMODE_16BIT:
4648 {
4649 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4650 if ( uNewIp > pCtx->cs.u32Limit
4651 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4652 return iemRaiseGeneralProtectionFault0(pIemCpu);
4653 pCtx->rip = uNewIp;
4654 break;
4655 }
4656
4657 case IEMMODE_32BIT:
4658 {
4659 Assert(pCtx->rip <= UINT32_MAX);
4660 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4661
4662 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4663 if (uNewEip > pCtx->cs.u32Limit)
4664 return iemRaiseGeneralProtectionFault0(pIemCpu);
4665 pCtx->rip = uNewEip;
4666 break;
4667 }
4668
4669 case IEMMODE_64BIT:
4670 {
4671 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4672
4673 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4674 if (!IEM_IS_CANONICAL(uNewRip))
4675 return iemRaiseGeneralProtectionFault0(pIemCpu);
4676 pCtx->rip = uNewRip;
4677 break;
4678 }
4679
4680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4681 }
4682
4683 pCtx->eflags.Bits.u1RF = 0;
4684 return VINF_SUCCESS;
4685}
4686
4687
4688/**
4689 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4690 *
4691 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4692 * segment limit.
4693 *
4694 * @returns Strict VBox status code.
4695 * @param pIemCpu The per CPU data.
4696 * @param offNextInstr The offset of the next instruction.
4697 */
4698static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4699{
4700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4701 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4702
4703 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4704 if ( uNewIp > pCtx->cs.u32Limit
4705 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4706 return iemRaiseGeneralProtectionFault0(pIemCpu);
4707 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4708 pCtx->rip = uNewIp;
4709 pCtx->eflags.Bits.u1RF = 0;
4710
4711 return VINF_SUCCESS;
4712}
4713
4714
4715/**
4716 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4717 *
4718 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4719 * segment limit.
4720 *
4721 * @returns Strict VBox status code.
4722 * @param pIemCpu The per CPU data.
4723 * @param offNextInstr The offset of the next instruction.
4724 */
4725static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4726{
4727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4728 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4729
4730 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4731 {
4732 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4733
4734 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4735 if (uNewEip > pCtx->cs.u32Limit)
4736 return iemRaiseGeneralProtectionFault0(pIemCpu);
4737 pCtx->rip = uNewEip;
4738 }
4739 else
4740 {
4741 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4742
4743 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4744 if (!IEM_IS_CANONICAL(uNewRip))
4745 return iemRaiseGeneralProtectionFault0(pIemCpu);
4746 pCtx->rip = uNewRip;
4747 }
4748 pCtx->eflags.Bits.u1RF = 0;
4749 return VINF_SUCCESS;
4750}
4751
4752
4753/**
4754 * Performs a near jump to the specified address.
4755 *
4756 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4757 * segment limit.
4758 *
4759 * @param pIemCpu The per CPU data.
4760 * @param uNewRip The new RIP value.
4761 */
4762static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4763{
4764 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4765 switch (pIemCpu->enmEffOpSize)
4766 {
4767 case IEMMODE_16BIT:
4768 {
4769 Assert(uNewRip <= UINT16_MAX);
4770 if ( uNewRip > pCtx->cs.u32Limit
4771 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4772 return iemRaiseGeneralProtectionFault0(pIemCpu);
4773 /** @todo Test 16-bit jump in 64-bit mode. */
4774 pCtx->rip = uNewRip;
4775 break;
4776 }
4777
4778 case IEMMODE_32BIT:
4779 {
4780 Assert(uNewRip <= UINT32_MAX);
4781 Assert(pCtx->rip <= UINT32_MAX);
4782 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4783
4784 if (uNewRip > pCtx->cs.u32Limit)
4785 return iemRaiseGeneralProtectionFault0(pIemCpu);
4786 pCtx->rip = uNewRip;
4787 break;
4788 }
4789
4790 case IEMMODE_64BIT:
4791 {
4792 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4793
4794 if (!IEM_IS_CANONICAL(uNewRip))
4795 return iemRaiseGeneralProtectionFault0(pIemCpu);
4796 pCtx->rip = uNewRip;
4797 break;
4798 }
4799
4800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4801 }
4802
4803 pCtx->eflags.Bits.u1RF = 0;
4804 return VINF_SUCCESS;
4805}
4806
4807
4808/**
4809 * Get the address of the top of the stack.
4810 *
4811 * @param pIemCpu The per CPU data.
4812 * @param pCtx The CPU context which SP/ESP/RSP should be
4813 * read.
4814 */
4815DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4816{
4817 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4818 return pCtx->rsp;
4819 if (pCtx->ss.Attr.n.u1DefBig)
4820 return pCtx->esp;
4821 return pCtx->sp;
4822}
4823
4824
4825/**
4826 * Updates the RIP/EIP/IP to point to the next instruction.
4827 *
4828 * This function leaves the EFLAGS.RF flag alone.
4829 *
4830 * @param pIemCpu The per CPU data.
4831 * @param cbInstr The number of bytes to add.
4832 */
4833static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4834{
4835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4836 switch (pIemCpu->enmCpuMode)
4837 {
4838 case IEMMODE_16BIT:
4839 Assert(pCtx->rip <= UINT16_MAX);
4840 pCtx->eip += cbInstr;
4841 pCtx->eip &= UINT32_C(0xffff);
4842 break;
4843
4844 case IEMMODE_32BIT:
4845 pCtx->eip += cbInstr;
4846 Assert(pCtx->rip <= UINT32_MAX);
4847 break;
4848
4849 case IEMMODE_64BIT:
4850 pCtx->rip += cbInstr;
4851 break;
4852 default: AssertFailed();
4853 }
4854}
4855
4856
4857#if 0
4858/**
4859 * Updates the RIP/EIP/IP to point to the next instruction.
4860 *
4861 * @param pIemCpu The per CPU data.
4862 */
4863static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4864{
4865 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4866}
4867#endif
4868
4869
4870
4871/**
4872 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4873 *
4874 * @param pIemCpu The per CPU data.
4875 * @param cbInstr The number of bytes to add.
4876 */
4877static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4878{
4879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4880
4881 pCtx->eflags.Bits.u1RF = 0;
4882
4883 switch (pIemCpu->enmCpuMode)
4884 {
4885 case IEMMODE_16BIT:
4886 Assert(pCtx->rip <= UINT16_MAX);
4887 pCtx->eip += cbInstr;
4888 pCtx->eip &= UINT32_C(0xffff);
4889 break;
4890
4891 case IEMMODE_32BIT:
4892 pCtx->eip += cbInstr;
4893 Assert(pCtx->rip <= UINT32_MAX);
4894 break;
4895
4896 case IEMMODE_64BIT:
4897 pCtx->rip += cbInstr;
4898 break;
4899 default: AssertFailed();
4900 }
4901}
4902
4903
4904/**
4905 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4906 *
4907 * @param pIemCpu The per CPU data.
4908 */
4909static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4910{
4911 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4912}
4913
4914
4915/**
4916 * Adds to the stack pointer.
4917 *
4918 * @param pIemCpu The per CPU data.
4919 * @param pCtx The CPU context which SP/ESP/RSP should be
4920 * updated.
4921 * @param cbToAdd The number of bytes to add.
4922 */
4923DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4924{
4925 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4926 pCtx->rsp += cbToAdd;
4927 else if (pCtx->ss.Attr.n.u1DefBig)
4928 pCtx->esp += cbToAdd;
4929 else
4930 pCtx->sp += cbToAdd;
4931}
4932
4933
4934/**
4935 * Subtracts from the stack pointer.
4936 *
4937 * @param pIemCpu The per CPU data.
4938 * @param pCtx The CPU context which SP/ESP/RSP should be
4939 * updated.
4940 * @param cbToSub The number of bytes to subtract.
4941 */
4942DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4943{
4944 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4945 pCtx->rsp -= cbToSub;
4946 else if (pCtx->ss.Attr.n.u1DefBig)
4947 pCtx->esp -= cbToSub;
4948 else
4949 pCtx->sp -= cbToSub;
4950}
4951
4952
4953/**
4954 * Adds to the temporary stack pointer.
4955 *
4956 * @param pIemCpu The per CPU data.
4957 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4958 * @param cbToAdd The number of bytes to add.
4959 * @param pCtx Where to get the current stack mode.
4960 */
4961DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4962{
4963 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4964 pTmpRsp->u += cbToAdd;
4965 else if (pCtx->ss.Attr.n.u1DefBig)
4966 pTmpRsp->DWords.dw0 += cbToAdd;
4967 else
4968 pTmpRsp->Words.w0 += cbToAdd;
4969}
4970
4971
4972/**
4973 * Subtracts from the temporary stack pointer.
4974 *
4975 * @param pIemCpu The per CPU data.
4976 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4977 * @param cbToSub The number of bytes to subtract.
4978 * @param pCtx Where to get the current stack mode.
4979 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4980 * expecting that.
4981 */
4982DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
4983{
4984 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4985 pTmpRsp->u -= cbToSub;
4986 else if (pCtx->ss.Attr.n.u1DefBig)
4987 pTmpRsp->DWords.dw0 -= cbToSub;
4988 else
4989 pTmpRsp->Words.w0 -= cbToSub;
4990}
4991
4992
4993/**
4994 * Calculates the effective stack address for a push of the specified size as
4995 * well as the new RSP value (upper bits may be masked).
4996 *
4997 * @returns Effective stack addressf for the push.
4998 * @param pIemCpu The IEM per CPU data.
4999 * @param pCtx Where to get the current stack mode.
5000 * @param cbItem The size of the stack item to pop.
5001 * @param puNewRsp Where to return the new RSP value.
5002 */
5003DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5004{
5005 RTUINT64U uTmpRsp;
5006 RTGCPTR GCPtrTop;
5007 uTmpRsp.u = pCtx->rsp;
5008
5009 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5010 GCPtrTop = uTmpRsp.u -= cbItem;
5011 else if (pCtx->ss.Attr.n.u1DefBig)
5012 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5013 else
5014 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5015 *puNewRsp = uTmpRsp.u;
5016 return GCPtrTop;
5017}
5018
5019
5020/**
5021 * Gets the current stack pointer and calculates the value after a pop of the
5022 * specified size.
5023 *
5024 * @returns Current stack pointer.
5025 * @param pIemCpu The per CPU data.
5026 * @param pCtx Where to get the current stack mode.
5027 * @param cbItem The size of the stack item to pop.
5028 * @param puNewRsp Where to return the new RSP value.
5029 */
5030DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5031{
5032 RTUINT64U uTmpRsp;
5033 RTGCPTR GCPtrTop;
5034 uTmpRsp.u = pCtx->rsp;
5035
5036 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5037 {
5038 GCPtrTop = uTmpRsp.u;
5039 uTmpRsp.u += cbItem;
5040 }
5041 else if (pCtx->ss.Attr.n.u1DefBig)
5042 {
5043 GCPtrTop = uTmpRsp.DWords.dw0;
5044 uTmpRsp.DWords.dw0 += cbItem;
5045 }
5046 else
5047 {
5048 GCPtrTop = uTmpRsp.Words.w0;
5049 uTmpRsp.Words.w0 += cbItem;
5050 }
5051 *puNewRsp = uTmpRsp.u;
5052 return GCPtrTop;
5053}
5054
5055
5056/**
5057 * Calculates the effective stack address for a push of the specified size as
5058 * well as the new temporary RSP value (upper bits may be masked).
5059 *
5060 * @returns Effective stack addressf for the push.
5061 * @param pIemCpu The per CPU data.
5062 * @param pTmpRsp The temporary stack pointer. This is updated.
5063 * @param cbItem The size of the stack item to pop.
5064 * @param puNewRsp Where to return the new RSP value.
5065 */
5066DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5067{
5068 RTGCPTR GCPtrTop;
5069
5070 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5071 GCPtrTop = pTmpRsp->u -= cbItem;
5072 else if (pCtx->ss.Attr.n.u1DefBig)
5073 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5074 else
5075 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5076 return GCPtrTop;
5077}
5078
5079
5080/**
5081 * Gets the effective stack address for a pop of the specified size and
5082 * calculates and updates the temporary RSP.
5083 *
5084 * @returns Current stack pointer.
5085 * @param pIemCpu The per CPU data.
5086 * @param pTmpRsp The temporary stack pointer. This is updated.
5087 * @param pCtx Where to get the current stack mode.
5088 * @param cbItem The size of the stack item to pop.
5089 */
5090DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5091{
5092 RTGCPTR GCPtrTop;
5093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5094 {
5095 GCPtrTop = pTmpRsp->u;
5096 pTmpRsp->u += cbItem;
5097 }
5098 else if (pCtx->ss.Attr.n.u1DefBig)
5099 {
5100 GCPtrTop = pTmpRsp->DWords.dw0;
5101 pTmpRsp->DWords.dw0 += cbItem;
5102 }
5103 else
5104 {
5105 GCPtrTop = pTmpRsp->Words.w0;
5106 pTmpRsp->Words.w0 += cbItem;
5107 }
5108 return GCPtrTop;
5109}
5110
5111
5112/**
5113 * Checks if an Intel CPUID feature bit is set.
5114 *
5115 * @returns true / false.
5116 *
5117 * @param pIemCpu The IEM per CPU data.
5118 * @param fEdx The EDX bit to test, or 0 if ECX.
5119 * @param fEcx The ECX bit to test, or 0 if EDX.
5120 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
5121 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
5122 */
5123static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5124{
5125 uint32_t uEax, uEbx, uEcx, uEdx;
5126 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
5127 return (fEcx && (uEcx & fEcx))
5128 || (fEdx && (uEdx & fEdx));
5129}
5130
5131
5132/**
5133 * Checks if an AMD CPUID feature bit is set.
5134 *
5135 * @returns true / false.
5136 *
5137 * @param pIemCpu The IEM per CPU data.
5138 * @param fEdx The EDX bit to test, or 0 if ECX.
5139 * @param fEcx The ECX bit to test, or 0 if EDX.
5140 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
5141 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
5142 */
5143static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5144{
5145 uint32_t uEax, uEbx, uEcx, uEdx;
5146 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
5147 return (fEcx && (uEcx & fEcx))
5148 || (fEdx && (uEdx & fEdx));
5149}
5150
5151/** @} */
5152
5153
5154/** @name FPU access and helpers.
5155 *
5156 * @{
5157 */
5158
5159
5160/**
5161 * Hook for preparing to use the host FPU.
5162 *
5163 * This is necessary in ring-0 and raw-mode context.
5164 *
5165 * @param pIemCpu The IEM per CPU data.
5166 */
5167DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5168{
5169#ifdef IN_RING3
5170 NOREF(pIemCpu);
5171#else
5172/** @todo RZ: FIXME */
5173//# error "Implement me"
5174#endif
5175}
5176
5177
5178/**
5179 * Hook for preparing to use the host FPU for SSE
5180 *
5181 * This is necessary in ring-0 and raw-mode context.
5182 *
5183 * @param pIemCpu The IEM per CPU data.
5184 */
5185DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5186{
5187 iemFpuPrepareUsage(pIemCpu);
5188}
5189
5190
5191/**
5192 * Stores a QNaN value into a FPU register.
5193 *
5194 * @param pReg Pointer to the register.
5195 */
5196DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5197{
5198 pReg->au32[0] = UINT32_C(0x00000000);
5199 pReg->au32[1] = UINT32_C(0xc0000000);
5200 pReg->au16[4] = UINT16_C(0xffff);
5201}
5202
5203
5204/**
5205 * Updates the FOP, FPU.CS and FPUIP registers.
5206 *
5207 * @param pIemCpu The IEM per CPU data.
5208 * @param pCtx The CPU context.
5209 */
5210DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5211{
5212 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5213 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5214 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
5215 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5216 {
5217 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5218 * happens in real mode here based on the fnsave and fnstenv images. */
5219 pCtx->fpu.CS = 0;
5220 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5221 }
5222 else
5223 {
5224 pCtx->fpu.CS = pCtx->cs.Sel;
5225 pCtx->fpu.FPUIP = pCtx->rip;
5226 }
5227}
5228
5229
5230/**
5231 * Updates the FPU.DS and FPUDP registers.
5232 *
5233 * @param pIemCpu The IEM per CPU data.
5234 * @param pCtx The CPU context.
5235 * @param iEffSeg The effective segment register.
5236 * @param GCPtrEff The effective address relative to @a iEffSeg.
5237 */
5238DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5239{
5240 RTSEL sel;
5241 switch (iEffSeg)
5242 {
5243 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5244 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5245 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5246 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5247 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5248 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5249 default:
5250 AssertMsgFailed(("%d\n", iEffSeg));
5251 sel = pCtx->ds.Sel;
5252 }
5253 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
5254 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5255 {
5256 pCtx->fpu.DS = 0;
5257 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5258 }
5259 else
5260 {
5261 pCtx->fpu.DS = sel;
5262 pCtx->fpu.FPUDP = GCPtrEff;
5263 }
5264}
5265
5266
5267/**
5268 * Rotates the stack registers in the push direction.
5269 *
5270 * @param pCtx The CPU context.
5271 * @remarks This is a complete waste of time, but fxsave stores the registers in
5272 * stack order.
5273 */
5274DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
5275{
5276 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
5277 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
5278 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
5279 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
5280 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
5281 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
5282 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
5283 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
5284 pCtx->fpu.aRegs[0].r80 = r80Tmp;
5285}
5286
5287
5288/**
5289 * Rotates the stack registers in the pop direction.
5290 *
5291 * @param pCtx The CPU context.
5292 * @remarks This is a complete waste of time, but fxsave stores the registers in
5293 * stack order.
5294 */
5295DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
5296{
5297 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
5298 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
5299 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
5300 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
5301 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
5302 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
5303 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
5304 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
5305 pCtx->fpu.aRegs[7].r80 = r80Tmp;
5306}
5307
5308
5309/**
5310 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5311 * exception prevents it.
5312 *
5313 * @param pIemCpu The IEM per CPU data.
5314 * @param pResult The FPU operation result to push.
5315 * @param pCtx The CPU context.
5316 */
5317static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
5318{
5319 /* Update FSW and bail if there are pending exceptions afterwards. */
5320 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5321 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5322 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5323 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5324 {
5325 pCtx->fpu.FSW = fFsw;
5326 return;
5327 }
5328
5329 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5330 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5331 {
5332 /* All is fine, push the actual value. */
5333 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5334 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
5335 }
5336 else if (pCtx->fpu.FCW & X86_FCW_IM)
5337 {
5338 /* Masked stack overflow, push QNaN. */
5339 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5340 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5341 }
5342 else
5343 {
5344 /* Raise stack overflow, don't push anything. */
5345 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5346 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5347 return;
5348 }
5349
5350 fFsw &= ~X86_FSW_TOP_MASK;
5351 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5352 pCtx->fpu.FSW = fFsw;
5353
5354 iemFpuRotateStackPush(pCtx);
5355}
5356
5357
5358/**
5359 * Stores a result in a FPU register and updates the FSW and FTW.
5360 *
5361 * @param pIemCpu The IEM per CPU data.
5362 * @param pResult The result to store.
5363 * @param iStReg Which FPU register to store it in.
5364 * @param pCtx The CPU context.
5365 */
5366static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
5367{
5368 Assert(iStReg < 8);
5369 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5370 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5371 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5372 pCtx->fpu.FTW |= RT_BIT(iReg);
5373 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
5374}
5375
5376
5377/**
5378 * Only updates the FPU status word (FSW) with the result of the current
5379 * instruction.
5380 *
5381 * @param pCtx The CPU context.
5382 * @param u16FSW The FSW output of the current instruction.
5383 */
5384static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
5385{
5386 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5387 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5388}
5389
5390
5391/**
5392 * Pops one item off the FPU stack if no pending exception prevents it.
5393 *
5394 * @param pCtx The CPU context.
5395 */
5396static void iemFpuMaybePopOne(PCPUMCTX pCtx)
5397{
5398 /* Check pending exceptions. */
5399 uint16_t uFSW = pCtx->fpu.FSW;
5400 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5401 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5402 return;
5403
5404 /* TOP--. */
5405 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5406 uFSW &= ~X86_FSW_TOP_MASK;
5407 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5408 pCtx->fpu.FSW = uFSW;
5409
5410 /* Mark the previous ST0 as empty. */
5411 iOldTop >>= X86_FSW_TOP_SHIFT;
5412 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
5413
5414 /* Rotate the registers. */
5415 iemFpuRotateStackPop(pCtx);
5416}
5417
5418
5419/**
5420 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5421 *
5422 * @param pIemCpu The IEM per CPU data.
5423 * @param pResult The FPU operation result to push.
5424 */
5425static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5426{
5427 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5428 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5429 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5430}
5431
5432
5433/**
5434 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5435 * and sets FPUDP and FPUDS.
5436 *
5437 * @param pIemCpu The IEM per CPU data.
5438 * @param pResult The FPU operation result to push.
5439 * @param iEffSeg The effective segment register.
5440 * @param GCPtrEff The effective address relative to @a iEffSeg.
5441 */
5442static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5443{
5444 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5445 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5446 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5447 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5448}
5449
5450
5451/**
5452 * Replace ST0 with the first value and push the second onto the FPU stack,
5453 * unless a pending exception prevents it.
5454 *
5455 * @param pIemCpu The IEM per CPU data.
5456 * @param pResult The FPU operation result to store and push.
5457 */
5458static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5459{
5460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5461 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5462
5463 /* Update FSW and bail if there are pending exceptions afterwards. */
5464 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5465 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5466 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5467 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5468 {
5469 pCtx->fpu.FSW = fFsw;
5470 return;
5471 }
5472
5473 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5474 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5475 {
5476 /* All is fine, push the actual value. */
5477 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5478 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
5479 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
5480 }
5481 else if (pCtx->fpu.FCW & X86_FCW_IM)
5482 {
5483 /* Masked stack overflow, push QNaN. */
5484 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5485 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5486 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5487 }
5488 else
5489 {
5490 /* Raise stack overflow, don't push anything. */
5491 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5492 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5493 return;
5494 }
5495
5496 fFsw &= ~X86_FSW_TOP_MASK;
5497 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5498 pCtx->fpu.FSW = fFsw;
5499
5500 iemFpuRotateStackPush(pCtx);
5501}
5502
5503
5504/**
5505 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5506 * FOP.
5507 *
5508 * @param pIemCpu The IEM per CPU data.
5509 * @param pResult The result to store.
5510 * @param iStReg Which FPU register to store it in.
5511 * @param pCtx The CPU context.
5512 */
5513static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5514{
5515 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5516 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5517 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5518}
5519
5520
5521/**
5522 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5523 * FOP, and then pops the stack.
5524 *
5525 * @param pIemCpu The IEM per CPU data.
5526 * @param pResult The result to store.
5527 * @param iStReg Which FPU register to store it in.
5528 * @param pCtx The CPU context.
5529 */
5530static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5531{
5532 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5533 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5534 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5535 iemFpuMaybePopOne(pCtx);
5536}
5537
5538
5539/**
5540 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5541 * FPUDP, and FPUDS.
5542 *
5543 * @param pIemCpu The IEM per CPU data.
5544 * @param pResult The result to store.
5545 * @param iStReg Which FPU register to store it in.
5546 * @param pCtx The CPU context.
5547 * @param iEffSeg The effective memory operand selector register.
5548 * @param GCPtrEff The effective memory operand offset.
5549 */
5550static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5551{
5552 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5553 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
5554 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5555 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5556}
5557
5558
5559/**
5560 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5561 * FPUDP, and FPUDS, and then pops the stack.
5562 *
5563 * @param pIemCpu The IEM per CPU data.
5564 * @param pResult The result to store.
5565 * @param iStReg Which FPU register to store it in.
5566 * @param pCtx The CPU context.
5567 * @param iEffSeg The effective memory operand selector register.
5568 * @param GCPtrEff The effective memory operand offset.
5569 */
5570static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5571 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5572{
5573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5574 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5575 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5576 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5577 iemFpuMaybePopOne(pCtx);
5578}
5579
5580
5581/**
5582 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5583 *
5584 * @param pIemCpu The IEM per CPU data.
5585 */
5586static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5587{
5588 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
5589}
5590
5591
5592/**
5593 * Marks the specified stack register as free (for FFREE).
5594 *
5595 * @param pIemCpu The IEM per CPU data.
5596 * @param iStReg The register to free.
5597 */
5598static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5599{
5600 Assert(iStReg < 8);
5601 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5602 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5603 pCtx->fpu.FTW &= ~RT_BIT(iReg);
5604}
5605
5606
5607/**
5608 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5609 *
5610 * @param pIemCpu The IEM per CPU data.
5611 */
5612static void iemFpuStackIncTop(PIEMCPU pIemCpu)
5613{
5614 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5615 uint16_t uFsw = pCtx->fpu.FSW;
5616 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5617 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5618 uFsw &= ~X86_FSW_TOP_MASK;
5619 uFsw |= uTop;
5620 pCtx->fpu.FSW = uFsw;
5621}
5622
5623
5624/**
5625 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5626 *
5627 * @param pIemCpu The IEM per CPU data.
5628 */
5629static void iemFpuStackDecTop(PIEMCPU pIemCpu)
5630{
5631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5632 uint16_t uFsw = pCtx->fpu.FSW;
5633 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5634 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5635 uFsw &= ~X86_FSW_TOP_MASK;
5636 uFsw |= uTop;
5637 pCtx->fpu.FSW = uFsw;
5638}
5639
5640
5641/**
5642 * Updates the FSW, FOP, FPUIP, and FPUCS.
5643 *
5644 * @param pIemCpu The IEM per CPU data.
5645 * @param u16FSW The FSW from the current instruction.
5646 */
5647static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5648{
5649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5650 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5651 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5652}
5653
5654
5655/**
5656 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5657 *
5658 * @param pIemCpu The IEM per CPU data.
5659 * @param u16FSW The FSW from the current instruction.
5660 */
5661static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5662{
5663 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5664 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5665 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5666 iemFpuMaybePopOne(pCtx);
5667}
5668
5669
5670/**
5671 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5672 *
5673 * @param pIemCpu The IEM per CPU data.
5674 * @param u16FSW The FSW from the current instruction.
5675 * @param iEffSeg The effective memory operand selector register.
5676 * @param GCPtrEff The effective memory operand offset.
5677 */
5678static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5679{
5680 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5681 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5682 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5683 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5684}
5685
5686
5687/**
5688 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5689 *
5690 * @param pIemCpu The IEM per CPU data.
5691 * @param u16FSW The FSW from the current instruction.
5692 */
5693static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5694{
5695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5696 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5697 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5698 iemFpuMaybePopOne(pCtx);
5699 iemFpuMaybePopOne(pCtx);
5700}
5701
5702
5703/**
5704 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5705 *
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param u16FSW The FSW from the current instruction.
5708 * @param iEffSeg The effective memory operand selector register.
5709 * @param GCPtrEff The effective memory operand offset.
5710 */
5711static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5712{
5713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5714 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5715 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5716 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5717 iemFpuMaybePopOne(pCtx);
5718}
5719
5720
5721/**
5722 * Worker routine for raising an FPU stack underflow exception.
5723 *
5724 * @param pIemCpu The IEM per CPU data.
5725 * @param iStReg The stack register being accessed.
5726 * @param pCtx The CPU context.
5727 */
5728static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
5729{
5730 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5731 if (pCtx->fpu.FCW & X86_FCW_IM)
5732 {
5733 /* Masked underflow. */
5734 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5735 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5736 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5737 if (iStReg != UINT8_MAX)
5738 {
5739 pCtx->fpu.FTW |= RT_BIT(iReg);
5740 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
5741 }
5742 }
5743 else
5744 {
5745 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5746 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5747 }
5748}
5749
5750
5751/**
5752 * Raises a FPU stack underflow exception.
5753 *
5754 * @param pIemCpu The IEM per CPU data.
5755 * @param iStReg The destination register that should be loaded
5756 * with QNaN if \#IS is not masked. Specify
5757 * UINT8_MAX if none (like for fcom).
5758 */
5759DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5760{
5761 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5762 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5763 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5764}
5765
5766
5767DECL_NO_INLINE(static, void)
5768iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5769{
5770 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5771 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5772 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5773 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5774}
5775
5776
5777DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5778{
5779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5780 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5781 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5782 iemFpuMaybePopOne(pCtx);
5783}
5784
5785
5786DECL_NO_INLINE(static, void)
5787iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5788{
5789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5790 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5791 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5792 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5793 iemFpuMaybePopOne(pCtx);
5794}
5795
5796
5797DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5798{
5799 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5800 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5801 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
5802 iemFpuMaybePopOne(pCtx);
5803 iemFpuMaybePopOne(pCtx);
5804}
5805
5806
5807DECL_NO_INLINE(static, void)
5808iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5809{
5810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5811 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5812
5813 if (pCtx->fpu.FCW & X86_FCW_IM)
5814 {
5815 /* Masked overflow - Push QNaN. */
5816 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5817 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5818 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5819 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5820 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5821 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5822 iemFpuRotateStackPush(pCtx);
5823 }
5824 else
5825 {
5826 /* Exception pending - don't change TOP or the register stack. */
5827 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5828 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5829 }
5830}
5831
5832
5833DECL_NO_INLINE(static, void)
5834iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5835{
5836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5837 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5838
5839 if (pCtx->fpu.FCW & X86_FCW_IM)
5840 {
5841 /* Masked overflow - Push QNaN. */
5842 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5843 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5844 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5845 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5846 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5847 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5848 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5849 iemFpuRotateStackPush(pCtx);
5850 }
5851 else
5852 {
5853 /* Exception pending - don't change TOP or the register stack. */
5854 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5855 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5856 }
5857}
5858
5859
5860/**
5861 * Worker routine for raising an FPU stack overflow exception on a push.
5862 *
5863 * @param pIemCpu The IEM per CPU data.
5864 * @param pCtx The CPU context.
5865 */
5866static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5867{
5868 if (pCtx->fpu.FCW & X86_FCW_IM)
5869 {
5870 /* Masked overflow. */
5871 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5872 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5873 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5874 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5875 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5876 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5877 iemFpuRotateStackPush(pCtx);
5878 }
5879 else
5880 {
5881 /* Exception pending - don't change TOP or the register stack. */
5882 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5883 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5884 }
5885}
5886
5887
5888/**
5889 * Raises a FPU stack overflow exception on a push.
5890 *
5891 * @param pIemCpu The IEM per CPU data.
5892 */
5893DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5894{
5895 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5896 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5897 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5898}
5899
5900
5901/**
5902 * Raises a FPU stack overflow exception on a push with a memory operand.
5903 *
5904 * @param pIemCpu The IEM per CPU data.
5905 * @param iEffSeg The effective memory operand selector register.
5906 * @param GCPtrEff The effective memory operand offset.
5907 */
5908DECL_NO_INLINE(static, void)
5909iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5910{
5911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5912 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5913 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5914 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5915}
5916
5917
5918static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5919{
5920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5921 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5922 if (pCtx->fpu.FTW & RT_BIT(iReg))
5923 return VINF_SUCCESS;
5924 return VERR_NOT_FOUND;
5925}
5926
5927
5928static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5929{
5930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5931 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5932 if (pCtx->fpu.FTW & RT_BIT(iReg))
5933 {
5934 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
5935 return VINF_SUCCESS;
5936 }
5937 return VERR_NOT_FOUND;
5938}
5939
5940
5941static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5942 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5943{
5944 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5945 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5946 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5947 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5948 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5949 {
5950 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5951 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
5952 return VINF_SUCCESS;
5953 }
5954 return VERR_NOT_FOUND;
5955}
5956
5957
5958static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5959{
5960 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5961 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5962 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5963 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5964 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5965 {
5966 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5967 return VINF_SUCCESS;
5968 }
5969 return VERR_NOT_FOUND;
5970}
5971
5972
5973/**
5974 * Updates the FPU exception status after FCW is changed.
5975 *
5976 * @param pCtx The CPU context.
5977 */
5978static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
5979{
5980 uint16_t u16Fsw = pCtx->fpu.FSW;
5981 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
5982 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5983 else
5984 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5985 pCtx->fpu.FSW = u16Fsw;
5986}
5987
5988
5989/**
5990 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5991 *
5992 * @returns The full FTW.
5993 * @param pCtx The CPU state.
5994 */
5995static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
5996{
5997 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
5998 uint16_t u16Ftw = 0;
5999 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
6000 for (unsigned iSt = 0; iSt < 8; iSt++)
6001 {
6002 unsigned const iReg = (iSt + iTop) & 7;
6003 if (!(u8Ftw & RT_BIT(iReg)))
6004 u16Ftw |= 3 << (iReg * 2); /* empty */
6005 else
6006 {
6007 uint16_t uTag;
6008 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
6009 if (pr80Reg->s.uExponent == 0x7fff)
6010 uTag = 2; /* Exponent is all 1's => Special. */
6011 else if (pr80Reg->s.uExponent == 0x0000)
6012 {
6013 if (pr80Reg->s.u64Mantissa == 0x0000)
6014 uTag = 1; /* All bits are zero => Zero. */
6015 else
6016 uTag = 2; /* Must be special. */
6017 }
6018 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6019 uTag = 0; /* Valid. */
6020 else
6021 uTag = 2; /* Must be special. */
6022
6023 u16Ftw |= uTag << (iReg * 2); /* empty */
6024 }
6025 }
6026
6027 return u16Ftw;
6028}
6029
6030
6031/**
6032 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6033 *
6034 * @returns The compressed FTW.
6035 * @param u16FullFtw The full FTW to convert.
6036 */
6037static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6038{
6039 uint8_t u8Ftw = 0;
6040 for (unsigned i = 0; i < 8; i++)
6041 {
6042 if ((u16FullFtw & 3) != 3 /*empty*/)
6043 u8Ftw |= RT_BIT(i);
6044 u16FullFtw >>= 2;
6045 }
6046
6047 return u8Ftw;
6048}
6049
6050/** @} */
6051
6052
6053/** @name Memory access.
6054 *
6055 * @{
6056 */
6057
6058
6059/**
6060 * Updates the IEMCPU::cbWritten counter if applicable.
6061 *
6062 * @param pIemCpu The IEM per CPU data.
6063 * @param fAccess The access being accounted for.
6064 * @param cbMem The access size.
6065 */
6066DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6067{
6068 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6069 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6070 pIemCpu->cbWritten += (uint32_t)cbMem;
6071}
6072
6073
6074/**
6075 * Checks if the given segment can be written to, raise the appropriate
6076 * exception if not.
6077 *
6078 * @returns VBox strict status code.
6079 *
6080 * @param pIemCpu The IEM per CPU data.
6081 * @param pHid Pointer to the hidden register.
6082 * @param iSegReg The register number.
6083 * @param pu64BaseAddr Where to return the base address to use for the
6084 * segment. (In 64-bit code it may differ from the
6085 * base in the hidden segment.)
6086 */
6087static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6088{
6089 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6090 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6091 else
6092 {
6093 if (!pHid->Attr.n.u1Present)
6094 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6095
6096 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6097 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6098 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6099 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6100 *pu64BaseAddr = pHid->u64Base;
6101 }
6102 return VINF_SUCCESS;
6103}
6104
6105
6106/**
6107 * Checks if the given segment can be read from, raise the appropriate
6108 * exception if not.
6109 *
6110 * @returns VBox strict status code.
6111 *
6112 * @param pIemCpu The IEM per CPU data.
6113 * @param pHid Pointer to the hidden register.
6114 * @param iSegReg The register number.
6115 * @param pu64BaseAddr Where to return the base address to use for the
6116 * segment. (In 64-bit code it may differ from the
6117 * base in the hidden segment.)
6118 */
6119static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6120{
6121 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6122 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6123 else
6124 {
6125 if (!pHid->Attr.n.u1Present)
6126 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6127
6128 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6129 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6130 *pu64BaseAddr = pHid->u64Base;
6131 }
6132 return VINF_SUCCESS;
6133}
6134
6135
6136/**
6137 * Applies the segment limit, base and attributes.
6138 *
6139 * This may raise a \#GP or \#SS.
6140 *
6141 * @returns VBox strict status code.
6142 *
6143 * @param pIemCpu The IEM per CPU data.
6144 * @param fAccess The kind of access which is being performed.
6145 * @param iSegReg The index of the segment register to apply.
6146 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6147 * TSS, ++).
6148 * @param pGCPtrMem Pointer to the guest memory address to apply
6149 * segmentation to. Input and output parameter.
6150 */
6151static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
6152 size_t cbMem, PRTGCPTR pGCPtrMem)
6153{
6154 if (iSegReg == UINT8_MAX)
6155 return VINF_SUCCESS;
6156
6157 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6158 switch (pIemCpu->enmCpuMode)
6159 {
6160 case IEMMODE_16BIT:
6161 case IEMMODE_32BIT:
6162 {
6163 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6164 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6165
6166 Assert(pSel->Attr.n.u1Present);
6167 Assert(pSel->Attr.n.u1DescType);
6168 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6169 {
6170 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6171 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6172 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6173
6174 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6175 {
6176 /** @todo CPL check. */
6177 }
6178
6179 /*
6180 * There are two kinds of data selectors, normal and expand down.
6181 */
6182 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6183 {
6184 if ( GCPtrFirst32 > pSel->u32Limit
6185 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6186 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6187 }
6188 else
6189 {
6190 /*
6191 * The upper boundary is defined by the B bit, not the G bit!
6192 */
6193 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6194 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6195 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6196 }
6197 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6198 }
6199 else
6200 {
6201
6202 /*
6203 * Code selector and usually be used to read thru, writing is
6204 * only permitted in real and V8086 mode.
6205 */
6206 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6207 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6208 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6209 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6210 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6211
6212 if ( GCPtrFirst32 > pSel->u32Limit
6213 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6214 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6215
6216 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6217 {
6218 /** @todo CPL check. */
6219 }
6220
6221 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6222 }
6223 return VINF_SUCCESS;
6224 }
6225
6226 case IEMMODE_64BIT:
6227 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6228 *pGCPtrMem += pSel->u64Base;
6229 return VINF_SUCCESS;
6230
6231 default:
6232 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6233 }
6234}
6235
6236
6237/**
6238 * Translates a virtual address to a physical physical address and checks if we
6239 * can access the page as specified.
6240 *
6241 * @param pIemCpu The IEM per CPU data.
6242 * @param GCPtrMem The virtual address.
6243 * @param fAccess The intended access.
6244 * @param pGCPhysMem Where to return the physical address.
6245 */
6246static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
6247 PRTGCPHYS pGCPhysMem)
6248{
6249 /** @todo Need a different PGM interface here. We're currently using
6250 * generic / REM interfaces. this won't cut it for R0 & RC. */
6251 RTGCPHYS GCPhys;
6252 uint64_t fFlags;
6253 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6254 if (RT_FAILURE(rc))
6255 {
6256 /** @todo Check unassigned memory in unpaged mode. */
6257 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6258 *pGCPhysMem = NIL_RTGCPHYS;
6259 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6260 }
6261
6262 /* If the page is writable and does not have the no-exec bit set, all
6263 access is allowed. Otherwise we'll have to check more carefully... */
6264 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6265 {
6266 /* Write to read only memory? */
6267 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6268 && !(fFlags & X86_PTE_RW)
6269 && ( pIemCpu->uCpl != 0
6270 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6271 {
6272 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6273 *pGCPhysMem = NIL_RTGCPHYS;
6274 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6275 }
6276
6277 /* Kernel memory accessed by userland? */
6278 if ( !(fFlags & X86_PTE_US)
6279 && pIemCpu->uCpl == 3
6280 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6281 {
6282 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6283 *pGCPhysMem = NIL_RTGCPHYS;
6284 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6285 }
6286
6287 /* Executing non-executable memory? */
6288 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6289 && (fFlags & X86_PTE_PAE_NX)
6290 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6291 {
6292 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6293 *pGCPhysMem = NIL_RTGCPHYS;
6294 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6295 VERR_ACCESS_DENIED);
6296 }
6297 }
6298
6299 /*
6300 * Set the dirty / access flags.
6301 * ASSUMES this is set when the address is translated rather than on committ...
6302 */
6303 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6304 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6305 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6306 {
6307 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6308 AssertRC(rc2);
6309 }
6310
6311 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6312 *pGCPhysMem = GCPhys;
6313 return VINF_SUCCESS;
6314}
6315
6316
6317
6318/**
6319 * Maps a physical page.
6320 *
6321 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6322 * @param pIemCpu The IEM per CPU data.
6323 * @param GCPhysMem The physical address.
6324 * @param fAccess The intended access.
6325 * @param ppvMem Where to return the mapping address.
6326 * @param pLock The PGM lock.
6327 */
6328static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6329{
6330#ifdef IEM_VERIFICATION_MODE_FULL
6331 /* Force the alternative path so we can ignore writes. */
6332 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6333 {
6334 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6335 {
6336 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6337 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6338 if (RT_FAILURE(rc2))
6339 pIemCpu->fProblematicMemory = true;
6340 }
6341 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6342 }
6343#endif
6344#ifdef IEM_LOG_MEMORY_WRITES
6345 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6346 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6347#endif
6348#ifdef IEM_VERIFICATION_MODE_MINIMAL
6349 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6350#endif
6351
6352 /** @todo This API may require some improving later. A private deal with PGM
6353 * regarding locking and unlocking needs to be struct. A couple of TLBs
6354 * living in PGM, but with publicly accessible inlined access methods
6355 * could perhaps be an even better solution. */
6356 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6357 GCPhysMem,
6358 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6359 pIemCpu->fBypassHandlers,
6360 ppvMem,
6361 pLock);
6362 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6363 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6364
6365#ifdef IEM_VERIFICATION_MODE_FULL
6366 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6367 pIemCpu->fProblematicMemory = true;
6368#endif
6369 return rc;
6370}
6371
6372
6373/**
6374 * Unmap a page previously mapped by iemMemPageMap.
6375 *
6376 * @param pIemCpu The IEM per CPU data.
6377 * @param GCPhysMem The physical address.
6378 * @param fAccess The intended access.
6379 * @param pvMem What iemMemPageMap returned.
6380 * @param pLock The PGM lock.
6381 */
6382DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6383{
6384 NOREF(pIemCpu);
6385 NOREF(GCPhysMem);
6386 NOREF(fAccess);
6387 NOREF(pvMem);
6388 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6389}
6390
6391
6392/**
6393 * Looks up a memory mapping entry.
6394 *
6395 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6396 * @param pIemCpu The IEM per CPU data.
6397 * @param pvMem The memory address.
6398 * @param fAccess The access to.
6399 */
6400DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6401{
6402 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6403 if ( pIemCpu->aMemMappings[0].pv == pvMem
6404 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6405 return 0;
6406 if ( pIemCpu->aMemMappings[1].pv == pvMem
6407 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6408 return 1;
6409 if ( pIemCpu->aMemMappings[2].pv == pvMem
6410 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6411 return 2;
6412 return VERR_NOT_FOUND;
6413}
6414
6415
6416/**
6417 * Finds a free memmap entry when using iNextMapping doesn't work.
6418 *
6419 * @returns Memory mapping index, 1024 on failure.
6420 * @param pIemCpu The IEM per CPU data.
6421 */
6422static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6423{
6424 /*
6425 * The easy case.
6426 */
6427 if (pIemCpu->cActiveMappings == 0)
6428 {
6429 pIemCpu->iNextMapping = 1;
6430 return 0;
6431 }
6432
6433 /* There should be enough mappings for all instructions. */
6434 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6435
6436 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6437 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6438 return i;
6439
6440 AssertFailedReturn(1024);
6441}
6442
6443
6444/**
6445 * Commits a bounce buffer that needs writing back and unmaps it.
6446 *
6447 * @returns Strict VBox status code.
6448 * @param pIemCpu The IEM per CPU data.
6449 * @param iMemMap The index of the buffer to commit.
6450 */
6451static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6452{
6453 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6454 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6455
6456 /*
6457 * Do the writing.
6458 */
6459 int rc;
6460#ifndef IEM_VERIFICATION_MODE_MINIMAL
6461 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6462 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6463 {
6464 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6465 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6466 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6467 if (!pIemCpu->fBypassHandlers)
6468 {
6469 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6470 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6471 pbBuf,
6472 cbFirst);
6473 if (cbSecond && rc == VINF_SUCCESS)
6474 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6475 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6476 pbBuf + cbFirst,
6477 cbSecond);
6478 }
6479 else
6480 {
6481 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6482 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6483 pbBuf,
6484 cbFirst);
6485 if (cbSecond && rc == VINF_SUCCESS)
6486 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6487 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6488 pbBuf + cbFirst,
6489 cbSecond);
6490 }
6491 if (rc != VINF_SUCCESS)
6492 {
6493 /** @todo status code handling */
6494 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6495 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
6496 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6497 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6498 }
6499 }
6500 else
6501#endif
6502 rc = VINF_SUCCESS;
6503
6504#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6505 /*
6506 * Record the write(s).
6507 */
6508 if (!pIemCpu->fNoRem)
6509 {
6510 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6511 if (pEvtRec)
6512 {
6513 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6514 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6515 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6516 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6517 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6518 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6519 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6520 }
6521 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6522 {
6523 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6524 if (pEvtRec)
6525 {
6526 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6527 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6528 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6529 memcpy(pEvtRec->u.RamWrite.ab,
6530 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6531 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6532 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6533 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6534 }
6535 }
6536 }
6537#endif
6538#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6539 if (rc == VINF_SUCCESS)
6540 {
6541 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6542 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6543 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6544 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6545 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6546 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6547
6548 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6549 g_cbIemWrote = cbWrote;
6550 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6551 }
6552#endif
6553
6554 /*
6555 * Free the mapping entry.
6556 */
6557 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6558 Assert(pIemCpu->cActiveMappings != 0);
6559 pIemCpu->cActiveMappings--;
6560 return rc;
6561}
6562
6563
6564/**
6565 * iemMemMap worker that deals with a request crossing pages.
6566 */
6567static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
6568 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6569{
6570 /*
6571 * Do the address translations.
6572 */
6573 RTGCPHYS GCPhysFirst;
6574 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6575 if (rcStrict != VINF_SUCCESS)
6576 return rcStrict;
6577
6578/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6579 * last byte. */
6580 RTGCPHYS GCPhysSecond;
6581 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6582 if (rcStrict != VINF_SUCCESS)
6583 return rcStrict;
6584 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6585
6586#ifdef IEM_VERIFICATION_MODE_FULL
6587 /*
6588 * Detect problematic memory when verifying so we can select
6589 * the right execution engine. (TLB: Redo this.)
6590 */
6591 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6592 {
6593 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
6594 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6595 if (RT_SUCCESS(rc2))
6596 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
6597 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6598 if (RT_FAILURE(rc2))
6599 pIemCpu->fProblematicMemory = true;
6600 }
6601#endif
6602
6603
6604 /*
6605 * Read in the current memory content if it's a read, execute or partial
6606 * write access.
6607 */
6608 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6609 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6610 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6611
6612 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6613 {
6614 int rc;
6615 if (!pIemCpu->fBypassHandlers)
6616 {
6617 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
6618 if (rc != VINF_SUCCESS)
6619 {
6620 /** @todo status code handling */
6621 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6622 return rc;
6623 }
6624 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
6625 if (rc != VINF_SUCCESS)
6626 {
6627 /** @todo status code handling */
6628 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6629 return rc;
6630 }
6631 }
6632 else
6633 {
6634 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
6635 if (rc != VINF_SUCCESS)
6636 {
6637 /** @todo status code handling */
6638 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6639 return rc;
6640 }
6641 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6642 if (rc != VINF_SUCCESS)
6643 {
6644 /** @todo status code handling */
6645 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6646 return rc;
6647 }
6648 }
6649
6650#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6651 if ( !pIemCpu->fNoRem
6652 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6653 {
6654 /*
6655 * Record the reads.
6656 */
6657 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6658 if (pEvtRec)
6659 {
6660 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6661 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6662 pEvtRec->u.RamRead.cb = cbFirstPage;
6663 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6664 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6665 }
6666 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6667 if (pEvtRec)
6668 {
6669 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6670 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6671 pEvtRec->u.RamRead.cb = cbSecondPage;
6672 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6673 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6674 }
6675 }
6676#endif
6677 }
6678#ifdef VBOX_STRICT
6679 else
6680 memset(pbBuf, 0xcc, cbMem);
6681#endif
6682#ifdef VBOX_STRICT
6683 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6684 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6685#endif
6686
6687 /*
6688 * Commit the bounce buffer entry.
6689 */
6690 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6691 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6692 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6693 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6694 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6695 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6696 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6697 pIemCpu->iNextMapping = iMemMap + 1;
6698 pIemCpu->cActiveMappings++;
6699
6700 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6701 *ppvMem = pbBuf;
6702 return VINF_SUCCESS;
6703}
6704
6705
6706/**
6707 * iemMemMap woker that deals with iemMemPageMap failures.
6708 */
6709static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6710 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6711{
6712 /*
6713 * Filter out conditions we can handle and the ones which shouldn't happen.
6714 */
6715 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6716 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6717 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6718 {
6719 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6720 return rcMap;
6721 }
6722 pIemCpu->cPotentialExits++;
6723
6724 /*
6725 * Read in the current memory content if it's a read, execute or partial
6726 * write access.
6727 */
6728 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6729 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6730 {
6731 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6732 memset(pbBuf, 0xff, cbMem);
6733 else
6734 {
6735 int rc;
6736 if (!pIemCpu->fBypassHandlers)
6737 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
6738 else
6739 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6740 if (rc != VINF_SUCCESS)
6741 {
6742 /** @todo status code handling */
6743 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
6744 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
6745 return rc;
6746 }
6747 }
6748
6749#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6750 if ( !pIemCpu->fNoRem
6751 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6752 {
6753 /*
6754 * Record the read.
6755 */
6756 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6757 if (pEvtRec)
6758 {
6759 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6760 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6761 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6762 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6763 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6764 }
6765 }
6766#endif
6767 }
6768#ifdef VBOX_STRICT
6769 else
6770 memset(pbBuf, 0xcc, cbMem);
6771#endif
6772#ifdef VBOX_STRICT
6773 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6774 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6775#endif
6776
6777 /*
6778 * Commit the bounce buffer entry.
6779 */
6780 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6781 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6782 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6783 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6784 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6785 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6786 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6787 pIemCpu->iNextMapping = iMemMap + 1;
6788 pIemCpu->cActiveMappings++;
6789
6790 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6791 *ppvMem = pbBuf;
6792 return VINF_SUCCESS;
6793}
6794
6795
6796
6797/**
6798 * Maps the specified guest memory for the given kind of access.
6799 *
6800 * This may be using bounce buffering of the memory if it's crossing a page
6801 * boundary or if there is an access handler installed for any of it. Because
6802 * of lock prefix guarantees, we're in for some extra clutter when this
6803 * happens.
6804 *
6805 * This may raise a \#GP, \#SS, \#PF or \#AC.
6806 *
6807 * @returns VBox strict status code.
6808 *
6809 * @param pIemCpu The IEM per CPU data.
6810 * @param ppvMem Where to return the pointer to the mapped
6811 * memory.
6812 * @param cbMem The number of bytes to map. This is usually 1,
6813 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6814 * string operations it can be up to a page.
6815 * @param iSegReg The index of the segment register to use for
6816 * this access. The base and limits are checked.
6817 * Use UINT8_MAX to indicate that no segmentation
6818 * is required (for IDT, GDT and LDT accesses).
6819 * @param GCPtrMem The address of the guest memory.
6820 * @param a_fAccess How the memory is being accessed. The
6821 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6822 * how to map the memory, while the
6823 * IEM_ACCESS_WHAT_XXX bit is used when raising
6824 * exceptions.
6825 */
6826static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6827{
6828 /*
6829 * Check the input and figure out which mapping entry to use.
6830 */
6831 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6832 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6833
6834 unsigned iMemMap = pIemCpu->iNextMapping;
6835 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6836 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6837 {
6838 iMemMap = iemMemMapFindFree(pIemCpu);
6839 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6840 }
6841
6842 /*
6843 * Map the memory, checking that we can actually access it. If something
6844 * slightly complicated happens, fall back on bounce buffering.
6845 */
6846 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6847 if (rcStrict != VINF_SUCCESS)
6848 return rcStrict;
6849
6850 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6851 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6852
6853 RTGCPHYS GCPhysFirst;
6854 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6855 if (rcStrict != VINF_SUCCESS)
6856 return rcStrict;
6857
6858 void *pvMem;
6859 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6860 if (rcStrict != VINF_SUCCESS)
6861 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6862
6863 /*
6864 * Fill in the mapping table entry.
6865 */
6866 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6867 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6868 pIemCpu->iNextMapping = iMemMap + 1;
6869 pIemCpu->cActiveMappings++;
6870
6871 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6872 *ppvMem = pvMem;
6873 return VINF_SUCCESS;
6874}
6875
6876
6877/**
6878 * Commits the guest memory if bounce buffered and unmaps it.
6879 *
6880 * @returns Strict VBox status code.
6881 * @param pIemCpu The IEM per CPU data.
6882 * @param pvMem The mapping.
6883 * @param fAccess The kind of access.
6884 */
6885static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6886{
6887 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
6888 AssertReturn(iMemMap >= 0, iMemMap);
6889
6890 /* If it's bounce buffered, we may need to write back the buffer. */
6891 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6892 {
6893 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6894 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
6895 }
6896 /* Otherwise unlock it. */
6897 else
6898 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6899
6900 /* Free the entry. */
6901 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6902 Assert(pIemCpu->cActiveMappings != 0);
6903 pIemCpu->cActiveMappings--;
6904 return VINF_SUCCESS;
6905}
6906
6907
6908/**
6909 * Rollbacks mappings, releasing page locks and such.
6910 *
6911 * The caller shall only call this after checking cActiveMappings.
6912 *
6913 * @returns Strict VBox status code to pass up.
6914 * @param pIemCpu The IEM per CPU data.
6915 */
6916static void iemMemRollback(PIEMCPU pIemCpu)
6917{
6918 Assert(pIemCpu->cActiveMappings > 0);
6919
6920 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
6921 while (iMemMap-- > 0)
6922 {
6923 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
6924 if (fAccess != IEM_ACCESS_INVALID)
6925 {
6926 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6927 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
6928 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6929 Assert(pIemCpu->cActiveMappings > 0);
6930 pIemCpu->cActiveMappings--;
6931 }
6932 }
6933}
6934
6935
6936/**
6937 * Fetches a data byte.
6938 *
6939 * @returns Strict VBox status code.
6940 * @param pIemCpu The IEM per CPU data.
6941 * @param pu8Dst Where to return the byte.
6942 * @param iSegReg The index of the segment register to use for
6943 * this access. The base and limits are checked.
6944 * @param GCPtrMem The address of the guest memory.
6945 */
6946static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6947{
6948 /* The lazy approach for now... */
6949 uint8_t const *pu8Src;
6950 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6951 if (rc == VINF_SUCCESS)
6952 {
6953 *pu8Dst = *pu8Src;
6954 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6955 }
6956 return rc;
6957}
6958
6959
6960/**
6961 * Fetches a data word.
6962 *
6963 * @returns Strict VBox status code.
6964 * @param pIemCpu The IEM per CPU data.
6965 * @param pu16Dst Where to return the word.
6966 * @param iSegReg The index of the segment register to use for
6967 * this access. The base and limits are checked.
6968 * @param GCPtrMem The address of the guest memory.
6969 */
6970static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6971{
6972 /* The lazy approach for now... */
6973 uint16_t const *pu16Src;
6974 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6975 if (rc == VINF_SUCCESS)
6976 {
6977 *pu16Dst = *pu16Src;
6978 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6979 }
6980 return rc;
6981}
6982
6983
6984/**
6985 * Fetches a data dword.
6986 *
6987 * @returns Strict VBox status code.
6988 * @param pIemCpu The IEM per CPU data.
6989 * @param pu32Dst Where to return the dword.
6990 * @param iSegReg The index of the segment register to use for
6991 * this access. The base and limits are checked.
6992 * @param GCPtrMem The address of the guest memory.
6993 */
6994static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6995{
6996 /* The lazy approach for now... */
6997 uint32_t const *pu32Src;
6998 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6999 if (rc == VINF_SUCCESS)
7000 {
7001 *pu32Dst = *pu32Src;
7002 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7003 }
7004 return rc;
7005}
7006
7007
7008#ifdef SOME_UNUSED_FUNCTION
7009/**
7010 * Fetches a data dword and sign extends it to a qword.
7011 *
7012 * @returns Strict VBox status code.
7013 * @param pIemCpu The IEM per CPU data.
7014 * @param pu64Dst Where to return the sign extended value.
7015 * @param iSegReg The index of the segment register to use for
7016 * this access. The base and limits are checked.
7017 * @param GCPtrMem The address of the guest memory.
7018 */
7019static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7020{
7021 /* The lazy approach for now... */
7022 int32_t const *pi32Src;
7023 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7024 if (rc == VINF_SUCCESS)
7025 {
7026 *pu64Dst = *pi32Src;
7027 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7028 }
7029#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7030 else
7031 *pu64Dst = 0;
7032#endif
7033 return rc;
7034}
7035#endif
7036
7037
7038/**
7039 * Fetches a data qword.
7040 *
7041 * @returns Strict VBox status code.
7042 * @param pIemCpu The IEM per CPU data.
7043 * @param pu64Dst Where to return the qword.
7044 * @param iSegReg The index of the segment register to use for
7045 * this access. The base and limits are checked.
7046 * @param GCPtrMem The address of the guest memory.
7047 */
7048static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7049{
7050 /* The lazy approach for now... */
7051 uint64_t const *pu64Src;
7052 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7053 if (rc == VINF_SUCCESS)
7054 {
7055 *pu64Dst = *pu64Src;
7056 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7057 }
7058 return rc;
7059}
7060
7061
7062/**
7063 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7064 *
7065 * @returns Strict VBox status code.
7066 * @param pIemCpu The IEM per CPU data.
7067 * @param pu64Dst Where to return the qword.
7068 * @param iSegReg The index of the segment register to use for
7069 * this access. The base and limits are checked.
7070 * @param GCPtrMem The address of the guest memory.
7071 */
7072static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7073{
7074 /* The lazy approach for now... */
7075 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7076 if (RT_UNLIKELY(GCPtrMem & 15))
7077 return iemRaiseGeneralProtectionFault0(pIemCpu);
7078
7079 uint64_t const *pu64Src;
7080 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7081 if (rc == VINF_SUCCESS)
7082 {
7083 *pu64Dst = *pu64Src;
7084 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7085 }
7086 return rc;
7087}
7088
7089
7090/**
7091 * Fetches a data tword.
7092 *
7093 * @returns Strict VBox status code.
7094 * @param pIemCpu The IEM per CPU data.
7095 * @param pr80Dst Where to return the tword.
7096 * @param iSegReg The index of the segment register to use for
7097 * this access. The base and limits are checked.
7098 * @param GCPtrMem The address of the guest memory.
7099 */
7100static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7101{
7102 /* The lazy approach for now... */
7103 PCRTFLOAT80U pr80Src;
7104 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7105 if (rc == VINF_SUCCESS)
7106 {
7107 *pr80Dst = *pr80Src;
7108 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7109 }
7110 return rc;
7111}
7112
7113
7114/**
7115 * Fetches a data dqword (double qword), generally SSE related.
7116 *
7117 * @returns Strict VBox status code.
7118 * @param pIemCpu The IEM per CPU data.
7119 * @param pu128Dst Where to return the qword.
7120 * @param iSegReg The index of the segment register to use for
7121 * this access. The base and limits are checked.
7122 * @param GCPtrMem The address of the guest memory.
7123 */
7124static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7125{
7126 /* The lazy approach for now... */
7127 uint128_t const *pu128Src;
7128 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7129 if (rc == VINF_SUCCESS)
7130 {
7131 *pu128Dst = *pu128Src;
7132 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7133 }
7134 return rc;
7135}
7136
7137
7138/**
7139 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7140 * related.
7141 *
7142 * Raises \#GP(0) if not aligned.
7143 *
7144 * @returns Strict VBox status code.
7145 * @param pIemCpu The IEM per CPU data.
7146 * @param pu128Dst Where to return the qword.
7147 * @param iSegReg The index of the segment register to use for
7148 * this access. The base and limits are checked.
7149 * @param GCPtrMem The address of the guest memory.
7150 */
7151static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7152{
7153 /* The lazy approach for now... */
7154 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7155 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7156 return iemRaiseGeneralProtectionFault0(pIemCpu);
7157
7158 uint128_t const *pu128Src;
7159 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7160 if (rc == VINF_SUCCESS)
7161 {
7162 *pu128Dst = *pu128Src;
7163 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7164 }
7165 return rc;
7166}
7167
7168
7169
7170
7171/**
7172 * Fetches a descriptor register (lgdt, lidt).
7173 *
7174 * @returns Strict VBox status code.
7175 * @param pIemCpu The IEM per CPU data.
7176 * @param pcbLimit Where to return the limit.
7177 * @param pGCPTrBase Where to return the base.
7178 * @param iSegReg The index of the segment register to use for
7179 * this access. The base and limits are checked.
7180 * @param GCPtrMem The address of the guest memory.
7181 * @param enmOpSize The effective operand size.
7182 */
7183static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
7184 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7185{
7186 uint8_t const *pu8Src;
7187 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7188 (void **)&pu8Src,
7189 enmOpSize == IEMMODE_64BIT
7190 ? 2 + 8
7191 : enmOpSize == IEMMODE_32BIT
7192 ? 2 + 4
7193 : 2 + 3,
7194 iSegReg,
7195 GCPtrMem,
7196 IEM_ACCESS_DATA_R);
7197 if (rcStrict == VINF_SUCCESS)
7198 {
7199 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7200 switch (enmOpSize)
7201 {
7202 case IEMMODE_16BIT:
7203 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7204 break;
7205 case IEMMODE_32BIT:
7206 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7207 break;
7208 case IEMMODE_64BIT:
7209 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7210 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7211 break;
7212
7213 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7214 }
7215 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7216 }
7217 return rcStrict;
7218}
7219
7220
7221
7222/**
7223 * Stores a data byte.
7224 *
7225 * @returns Strict VBox status code.
7226 * @param pIemCpu The IEM per CPU data.
7227 * @param iSegReg The index of the segment register to use for
7228 * this access. The base and limits are checked.
7229 * @param GCPtrMem The address of the guest memory.
7230 * @param u8Value The value to store.
7231 */
7232static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7233{
7234 /* The lazy approach for now... */
7235 uint8_t *pu8Dst;
7236 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7237 if (rc == VINF_SUCCESS)
7238 {
7239 *pu8Dst = u8Value;
7240 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7241 }
7242 return rc;
7243}
7244
7245
7246/**
7247 * Stores a data word.
7248 *
7249 * @returns Strict VBox status code.
7250 * @param pIemCpu The IEM per CPU data.
7251 * @param iSegReg The index of the segment register to use for
7252 * this access. The base and limits are checked.
7253 * @param GCPtrMem The address of the guest memory.
7254 * @param u16Value The value to store.
7255 */
7256static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7257{
7258 /* The lazy approach for now... */
7259 uint16_t *pu16Dst;
7260 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7261 if (rc == VINF_SUCCESS)
7262 {
7263 *pu16Dst = u16Value;
7264 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7265 }
7266 return rc;
7267}
7268
7269
7270/**
7271 * Stores a data dword.
7272 *
7273 * @returns Strict VBox status code.
7274 * @param pIemCpu The IEM per CPU data.
7275 * @param iSegReg The index of the segment register to use for
7276 * this access. The base and limits are checked.
7277 * @param GCPtrMem The address of the guest memory.
7278 * @param u32Value The value to store.
7279 */
7280static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7281{
7282 /* The lazy approach for now... */
7283 uint32_t *pu32Dst;
7284 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7285 if (rc == VINF_SUCCESS)
7286 {
7287 *pu32Dst = u32Value;
7288 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7289 }
7290 return rc;
7291}
7292
7293
7294/**
7295 * Stores a data qword.
7296 *
7297 * @returns Strict VBox status code.
7298 * @param pIemCpu The IEM per CPU data.
7299 * @param iSegReg The index of the segment register to use for
7300 * this access. The base and limits are checked.
7301 * @param GCPtrMem The address of the guest memory.
7302 * @param u64Value The value to store.
7303 */
7304static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7305{
7306 /* The lazy approach for now... */
7307 uint64_t *pu64Dst;
7308 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7309 if (rc == VINF_SUCCESS)
7310 {
7311 *pu64Dst = u64Value;
7312 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7313 }
7314 return rc;
7315}
7316
7317
7318/**
7319 * Stores a data dqword.
7320 *
7321 * @returns Strict VBox status code.
7322 * @param pIemCpu The IEM per CPU data.
7323 * @param iSegReg The index of the segment register to use for
7324 * this access. The base and limits are checked.
7325 * @param GCPtrMem The address of the guest memory.
7326 * @param u64Value The value to store.
7327 */
7328static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7329{
7330 /* The lazy approach for now... */
7331 uint128_t *pu128Dst;
7332 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7333 if (rc == VINF_SUCCESS)
7334 {
7335 *pu128Dst = u128Value;
7336 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7337 }
7338 return rc;
7339}
7340
7341
7342/**
7343 * Stores a data dqword, SSE aligned.
7344 *
7345 * @returns Strict VBox status code.
7346 * @param pIemCpu The IEM per CPU data.
7347 * @param iSegReg The index of the segment register to use for
7348 * this access. The base and limits are checked.
7349 * @param GCPtrMem The address of the guest memory.
7350 * @param u64Value The value to store.
7351 */
7352static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7353{
7354 /* The lazy approach for now... */
7355 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7356 return iemRaiseGeneralProtectionFault0(pIemCpu);
7357
7358 uint128_t *pu128Dst;
7359 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7360 if (rc == VINF_SUCCESS)
7361 {
7362 *pu128Dst = u128Value;
7363 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7364 }
7365 return rc;
7366}
7367
7368
7369/**
7370 * Stores a descriptor register (sgdt, sidt).
7371 *
7372 * @returns Strict VBox status code.
7373 * @param pIemCpu The IEM per CPU data.
7374 * @param cbLimit The limit.
7375 * @param GCPTrBase The base address.
7376 * @param iSegReg The index of the segment register to use for
7377 * this access. The base and limits are checked.
7378 * @param GCPtrMem The address of the guest memory.
7379 * @param enmOpSize The effective operand size.
7380 */
7381static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
7382 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7383{
7384 uint8_t *pu8Src;
7385 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7386 (void **)&pu8Src,
7387 enmOpSize == IEMMODE_64BIT
7388 ? 2 + 8
7389 : enmOpSize == IEMMODE_32BIT
7390 ? 2 + 4
7391 : 2 + 3,
7392 iSegReg,
7393 GCPtrMem,
7394 IEM_ACCESS_DATA_W);
7395 if (rcStrict == VINF_SUCCESS)
7396 {
7397 pu8Src[0] = RT_BYTE1(cbLimit);
7398 pu8Src[1] = RT_BYTE2(cbLimit);
7399 pu8Src[2] = RT_BYTE1(GCPtrBase);
7400 pu8Src[3] = RT_BYTE2(GCPtrBase);
7401 pu8Src[4] = RT_BYTE3(GCPtrBase);
7402 if (enmOpSize == IEMMODE_16BIT)
7403 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7404 else
7405 {
7406 pu8Src[5] = RT_BYTE4(GCPtrBase);
7407 if (enmOpSize == IEMMODE_64BIT)
7408 {
7409 pu8Src[6] = RT_BYTE5(GCPtrBase);
7410 pu8Src[7] = RT_BYTE6(GCPtrBase);
7411 pu8Src[8] = RT_BYTE7(GCPtrBase);
7412 pu8Src[9] = RT_BYTE8(GCPtrBase);
7413 }
7414 }
7415 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7416 }
7417 return rcStrict;
7418}
7419
7420
7421/**
7422 * Pushes a word onto the stack.
7423 *
7424 * @returns Strict VBox status code.
7425 * @param pIemCpu The IEM per CPU data.
7426 * @param u16Value The value to push.
7427 */
7428static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7429{
7430 /* Increment the stack pointer. */
7431 uint64_t uNewRsp;
7432 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7433 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7434
7435 /* Write the word the lazy way. */
7436 uint16_t *pu16Dst;
7437 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7438 if (rc == VINF_SUCCESS)
7439 {
7440 *pu16Dst = u16Value;
7441 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7442 }
7443
7444 /* Commit the new RSP value unless we an access handler made trouble. */
7445 if (rc == VINF_SUCCESS)
7446 pCtx->rsp = uNewRsp;
7447
7448 return rc;
7449}
7450
7451
7452/**
7453 * Pushes a dword onto the stack.
7454 *
7455 * @returns Strict VBox status code.
7456 * @param pIemCpu The IEM per CPU data.
7457 * @param u32Value The value to push.
7458 */
7459static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7460{
7461 /* Increment the stack pointer. */
7462 uint64_t uNewRsp;
7463 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7464 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7465
7466 /* Write the dword the lazy way. */
7467 uint32_t *pu32Dst;
7468 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7469 if (rc == VINF_SUCCESS)
7470 {
7471 *pu32Dst = u32Value;
7472 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7473 }
7474
7475 /* Commit the new RSP value unless we an access handler made trouble. */
7476 if (rc == VINF_SUCCESS)
7477 pCtx->rsp = uNewRsp;
7478
7479 return rc;
7480}
7481
7482
7483/**
7484 * Pushes a dword segment register value onto the stack.
7485 *
7486 * @returns Strict VBox status code.
7487 * @param pIemCpu The IEM per CPU data.
7488 * @param u16Value The value to push.
7489 */
7490static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7491{
7492 /* Increment the stack pointer. */
7493 uint64_t uNewRsp;
7494 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7495 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7496
7497 VBOXSTRICTRC rc;
7498 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7499 {
7500 /* The recompiler writes a full dword. */
7501 uint32_t *pu32Dst;
7502 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7503 if (rc == VINF_SUCCESS)
7504 {
7505 *pu32Dst = u32Value;
7506 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7507 }
7508 }
7509 else
7510 {
7511 /* The intel docs talks about zero extending the selector register
7512 value. My actual intel CPU here might be zero extending the value
7513 but it still only writes the lower word... */
7514 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7515 * happens when crossing an electric page boundrary, is the high word
7516 * checked for write accessibility or not? Probably it is. What about
7517 * segment limits? */
7518 uint16_t *pu16Dst;
7519 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7520 if (rc == VINF_SUCCESS)
7521 {
7522 *pu16Dst = (uint16_t)u32Value;
7523 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7524 }
7525 }
7526
7527 /* Commit the new RSP value unless we an access handler made trouble. */
7528 if (rc == VINF_SUCCESS)
7529 pCtx->rsp = uNewRsp;
7530
7531 return rc;
7532}
7533
7534
7535/**
7536 * Pushes a qword onto the stack.
7537 *
7538 * @returns Strict VBox status code.
7539 * @param pIemCpu The IEM per CPU data.
7540 * @param u64Value The value to push.
7541 */
7542static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7543{
7544 /* Increment the stack pointer. */
7545 uint64_t uNewRsp;
7546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7547 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7548
7549 /* Write the word the lazy way. */
7550 uint64_t *pu64Dst;
7551 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7552 if (rc == VINF_SUCCESS)
7553 {
7554 *pu64Dst = u64Value;
7555 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7556 }
7557
7558 /* Commit the new RSP value unless we an access handler made trouble. */
7559 if (rc == VINF_SUCCESS)
7560 pCtx->rsp = uNewRsp;
7561
7562 return rc;
7563}
7564
7565
7566/**
7567 * Pops a word from the stack.
7568 *
7569 * @returns Strict VBox status code.
7570 * @param pIemCpu The IEM per CPU data.
7571 * @param pu16Value Where to store the popped value.
7572 */
7573static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7574{
7575 /* Increment the stack pointer. */
7576 uint64_t uNewRsp;
7577 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7578 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7579
7580 /* Write the word the lazy way. */
7581 uint16_t const *pu16Src;
7582 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7583 if (rc == VINF_SUCCESS)
7584 {
7585 *pu16Value = *pu16Src;
7586 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7587
7588 /* Commit the new RSP value. */
7589 if (rc == VINF_SUCCESS)
7590 pCtx->rsp = uNewRsp;
7591 }
7592
7593 return rc;
7594}
7595
7596
7597/**
7598 * Pops a dword from the stack.
7599 *
7600 * @returns Strict VBox status code.
7601 * @param pIemCpu The IEM per CPU data.
7602 * @param pu32Value Where to store the popped value.
7603 */
7604static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7605{
7606 /* Increment the stack pointer. */
7607 uint64_t uNewRsp;
7608 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7609 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7610
7611 /* Write the word the lazy way. */
7612 uint32_t const *pu32Src;
7613 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7614 if (rc == VINF_SUCCESS)
7615 {
7616 *pu32Value = *pu32Src;
7617 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7618
7619 /* Commit the new RSP value. */
7620 if (rc == VINF_SUCCESS)
7621 pCtx->rsp = uNewRsp;
7622 }
7623
7624 return rc;
7625}
7626
7627
7628/**
7629 * Pops a qword from the stack.
7630 *
7631 * @returns Strict VBox status code.
7632 * @param pIemCpu The IEM per CPU data.
7633 * @param pu64Value Where to store the popped value.
7634 */
7635static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7636{
7637 /* Increment the stack pointer. */
7638 uint64_t uNewRsp;
7639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7640 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7641
7642 /* Write the word the lazy way. */
7643 uint64_t const *pu64Src;
7644 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7645 if (rc == VINF_SUCCESS)
7646 {
7647 *pu64Value = *pu64Src;
7648 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7649
7650 /* Commit the new RSP value. */
7651 if (rc == VINF_SUCCESS)
7652 pCtx->rsp = uNewRsp;
7653 }
7654
7655 return rc;
7656}
7657
7658
7659/**
7660 * Pushes a word onto the stack, using a temporary stack pointer.
7661 *
7662 * @returns Strict VBox status code.
7663 * @param pIemCpu The IEM per CPU data.
7664 * @param u16Value The value to push.
7665 * @param pTmpRsp Pointer to the temporary stack pointer.
7666 */
7667static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7668{
7669 /* Increment the stack pointer. */
7670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7671 RTUINT64U NewRsp = *pTmpRsp;
7672 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7673
7674 /* Write the word the lazy way. */
7675 uint16_t *pu16Dst;
7676 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7677 if (rc == VINF_SUCCESS)
7678 {
7679 *pu16Dst = u16Value;
7680 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7681 }
7682
7683 /* Commit the new RSP value unless we an access handler made trouble. */
7684 if (rc == VINF_SUCCESS)
7685 *pTmpRsp = NewRsp;
7686
7687 return rc;
7688}
7689
7690
7691/**
7692 * Pushes a dword onto the stack, using a temporary stack pointer.
7693 *
7694 * @returns Strict VBox status code.
7695 * @param pIemCpu The IEM per CPU data.
7696 * @param u32Value The value to push.
7697 * @param pTmpRsp Pointer to the temporary stack pointer.
7698 */
7699static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7700{
7701 /* Increment the stack pointer. */
7702 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7703 RTUINT64U NewRsp = *pTmpRsp;
7704 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7705
7706 /* Write the word the lazy way. */
7707 uint32_t *pu32Dst;
7708 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7709 if (rc == VINF_SUCCESS)
7710 {
7711 *pu32Dst = u32Value;
7712 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7713 }
7714
7715 /* Commit the new RSP value unless we an access handler made trouble. */
7716 if (rc == VINF_SUCCESS)
7717 *pTmpRsp = NewRsp;
7718
7719 return rc;
7720}
7721
7722
7723/**
7724 * Pushes a dword onto the stack, using a temporary stack pointer.
7725 *
7726 * @returns Strict VBox status code.
7727 * @param pIemCpu The IEM per CPU data.
7728 * @param u64Value The value to push.
7729 * @param pTmpRsp Pointer to the temporary stack pointer.
7730 */
7731static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7732{
7733 /* Increment the stack pointer. */
7734 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7735 RTUINT64U NewRsp = *pTmpRsp;
7736 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7737
7738 /* Write the word the lazy way. */
7739 uint64_t *pu64Dst;
7740 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7741 if (rc == VINF_SUCCESS)
7742 {
7743 *pu64Dst = u64Value;
7744 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7745 }
7746
7747 /* Commit the new RSP value unless we an access handler made trouble. */
7748 if (rc == VINF_SUCCESS)
7749 *pTmpRsp = NewRsp;
7750
7751 return rc;
7752}
7753
7754
7755/**
7756 * Pops a word from the stack, using a temporary stack pointer.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pIemCpu The IEM per CPU data.
7760 * @param pu16Value Where to store the popped value.
7761 * @param pTmpRsp Pointer to the temporary stack pointer.
7762 */
7763static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7764{
7765 /* Increment the stack pointer. */
7766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7767 RTUINT64U NewRsp = *pTmpRsp;
7768 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7769
7770 /* Write the word the lazy way. */
7771 uint16_t const *pu16Src;
7772 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7773 if (rc == VINF_SUCCESS)
7774 {
7775 *pu16Value = *pu16Src;
7776 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7777
7778 /* Commit the new RSP value. */
7779 if (rc == VINF_SUCCESS)
7780 *pTmpRsp = NewRsp;
7781 }
7782
7783 return rc;
7784}
7785
7786
7787/**
7788 * Pops a dword from the stack, using a temporary stack pointer.
7789 *
7790 * @returns Strict VBox status code.
7791 * @param pIemCpu The IEM per CPU data.
7792 * @param pu32Value Where to store the popped value.
7793 * @param pTmpRsp Pointer to the temporary stack pointer.
7794 */
7795static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7796{
7797 /* Increment the stack pointer. */
7798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7799 RTUINT64U NewRsp = *pTmpRsp;
7800 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7801
7802 /* Write the word the lazy way. */
7803 uint32_t const *pu32Src;
7804 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7805 if (rc == VINF_SUCCESS)
7806 {
7807 *pu32Value = *pu32Src;
7808 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7809
7810 /* Commit the new RSP value. */
7811 if (rc == VINF_SUCCESS)
7812 *pTmpRsp = NewRsp;
7813 }
7814
7815 return rc;
7816}
7817
7818
7819/**
7820 * Pops a qword from the stack, using a temporary stack pointer.
7821 *
7822 * @returns Strict VBox status code.
7823 * @param pIemCpu The IEM per CPU data.
7824 * @param pu64Value Where to store the popped value.
7825 * @param pTmpRsp Pointer to the temporary stack pointer.
7826 */
7827static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7828{
7829 /* Increment the stack pointer. */
7830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7831 RTUINT64U NewRsp = *pTmpRsp;
7832 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7833
7834 /* Write the word the lazy way. */
7835 uint64_t const *pu64Src;
7836 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7837 if (rcStrict == VINF_SUCCESS)
7838 {
7839 *pu64Value = *pu64Src;
7840 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7841
7842 /* Commit the new RSP value. */
7843 if (rcStrict == VINF_SUCCESS)
7844 *pTmpRsp = NewRsp;
7845 }
7846
7847 return rcStrict;
7848}
7849
7850
7851/**
7852 * Begin a special stack push (used by interrupt, exceptions and such).
7853 *
7854 * This will raise #SS or #PF if appropriate.
7855 *
7856 * @returns Strict VBox status code.
7857 * @param pIemCpu The IEM per CPU data.
7858 * @param cbMem The number of bytes to push onto the stack.
7859 * @param ppvMem Where to return the pointer to the stack memory.
7860 * As with the other memory functions this could be
7861 * direct access or bounce buffered access, so
7862 * don't commit register until the commit call
7863 * succeeds.
7864 * @param puNewRsp Where to return the new RSP value. This must be
7865 * passed unchanged to
7866 * iemMemStackPushCommitSpecial().
7867 */
7868static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7869{
7870 Assert(cbMem < UINT8_MAX);
7871 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7872 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7873 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7874}
7875
7876
7877/**
7878 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7879 *
7880 * This will update the rSP.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pIemCpu The IEM per CPU data.
7884 * @param pvMem The pointer returned by
7885 * iemMemStackPushBeginSpecial().
7886 * @param uNewRsp The new RSP value returned by
7887 * iemMemStackPushBeginSpecial().
7888 */
7889static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
7890{
7891 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
7892 if (rcStrict == VINF_SUCCESS)
7893 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7894 return rcStrict;
7895}
7896
7897
7898/**
7899 * Begin a special stack pop (used by iret, retf and such).
7900 *
7901 * This will raise \#SS or \#PF if appropriate.
7902 *
7903 * @returns Strict VBox status code.
7904 * @param pIemCpu The IEM per CPU data.
7905 * @param cbMem The number of bytes to push onto the stack.
7906 * @param ppvMem Where to return the pointer to the stack memory.
7907 * @param puNewRsp Where to return the new RSP value. This must be
7908 * passed unchanged to
7909 * iemMemStackPopCommitSpecial() or applied
7910 * manually if iemMemStackPopDoneSpecial() is used.
7911 */
7912static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7913{
7914 Assert(cbMem < UINT8_MAX);
7915 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7916 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7917 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7918}
7919
7920
7921/**
7922 * Continue a special stack pop (used by iret and retf).
7923 *
7924 * This will raise \#SS or \#PF if appropriate.
7925 *
7926 * @returns Strict VBox status code.
7927 * @param pIemCpu The IEM per CPU data.
7928 * @param cbMem The number of bytes to push onto the stack.
7929 * @param ppvMem Where to return the pointer to the stack memory.
7930 * @param puNewRsp Where to return the new RSP value. This must be
7931 * passed unchanged to
7932 * iemMemStackPopCommitSpecial() or applied
7933 * manually if iemMemStackPopDoneSpecial() is used.
7934 */
7935static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7936{
7937 Assert(cbMem < UINT8_MAX);
7938 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7939 RTUINT64U NewRsp;
7940 NewRsp.u = *puNewRsp;
7941 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7942 *puNewRsp = NewRsp.u;
7943 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7944}
7945
7946
7947/**
7948 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7949 *
7950 * This will update the rSP.
7951 *
7952 * @returns Strict VBox status code.
7953 * @param pIemCpu The IEM per CPU data.
7954 * @param pvMem The pointer returned by
7955 * iemMemStackPopBeginSpecial().
7956 * @param uNewRsp The new RSP value returned by
7957 * iemMemStackPopBeginSpecial().
7958 */
7959static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7960{
7961 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7962 if (rcStrict == VINF_SUCCESS)
7963 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7964 return rcStrict;
7965}
7966
7967
7968/**
7969 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7970 * iemMemStackPopContinueSpecial).
7971 *
7972 * The caller will manually commit the rSP.
7973 *
7974 * @returns Strict VBox status code.
7975 * @param pIemCpu The IEM per CPU data.
7976 * @param pvMem The pointer returned by
7977 * iemMemStackPopBeginSpecial() or
7978 * iemMemStackPopContinueSpecial().
7979 */
7980static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
7981{
7982 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7983}
7984
7985
7986/**
7987 * Fetches a system table byte.
7988 *
7989 * @returns Strict VBox status code.
7990 * @param pIemCpu The IEM per CPU data.
7991 * @param pbDst Where to return the byte.
7992 * @param iSegReg The index of the segment register to use for
7993 * this access. The base and limits are checked.
7994 * @param GCPtrMem The address of the guest memory.
7995 */
7996static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7997{
7998 /* The lazy approach for now... */
7999 uint8_t const *pbSrc;
8000 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8001 if (rc == VINF_SUCCESS)
8002 {
8003 *pbDst = *pbSrc;
8004 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8005 }
8006 return rc;
8007}
8008
8009
8010/**
8011 * Fetches a system table word.
8012 *
8013 * @returns Strict VBox status code.
8014 * @param pIemCpu The IEM per CPU data.
8015 * @param pu16Dst Where to return the word.
8016 * @param iSegReg The index of the segment register to use for
8017 * this access. The base and limits are checked.
8018 * @param GCPtrMem The address of the guest memory.
8019 */
8020static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8021{
8022 /* The lazy approach for now... */
8023 uint16_t const *pu16Src;
8024 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8025 if (rc == VINF_SUCCESS)
8026 {
8027 *pu16Dst = *pu16Src;
8028 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8029 }
8030 return rc;
8031}
8032
8033
8034/**
8035 * Fetches a system table dword.
8036 *
8037 * @returns Strict VBox status code.
8038 * @param pIemCpu The IEM per CPU data.
8039 * @param pu32Dst Where to return the dword.
8040 * @param iSegReg The index of the segment register to use for
8041 * this access. The base and limits are checked.
8042 * @param GCPtrMem The address of the guest memory.
8043 */
8044static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8045{
8046 /* The lazy approach for now... */
8047 uint32_t const *pu32Src;
8048 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8049 if (rc == VINF_SUCCESS)
8050 {
8051 *pu32Dst = *pu32Src;
8052 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8053 }
8054 return rc;
8055}
8056
8057
8058/**
8059 * Fetches a system table qword.
8060 *
8061 * @returns Strict VBox status code.
8062 * @param pIemCpu The IEM per CPU data.
8063 * @param pu64Dst Where to return the qword.
8064 * @param iSegReg The index of the segment register to use for
8065 * this access. The base and limits are checked.
8066 * @param GCPtrMem The address of the guest memory.
8067 */
8068static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8069{
8070 /* The lazy approach for now... */
8071 uint64_t const *pu64Src;
8072 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8073 if (rc == VINF_SUCCESS)
8074 {
8075 *pu64Dst = *pu64Src;
8076 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8077 }
8078 return rc;
8079}
8080
8081
8082/**
8083 * Fetches a descriptor table entry with caller specified error code.
8084 *
8085 * @returns Strict VBox status code.
8086 * @param pIemCpu The IEM per CPU.
8087 * @param pDesc Where to return the descriptor table entry.
8088 * @param uSel The selector which table entry to fetch.
8089 * @param uXcpt The exception to raise on table lookup error.
8090 * @param uErrorCode The error code associated with the exception.
8091 */
8092static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
8093 uint16_t uErrorCode)
8094{
8095 AssertPtr(pDesc);
8096 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8097
8098 /** @todo did the 286 require all 8 bytes to be accessible? */
8099 /*
8100 * Get the selector table base and check bounds.
8101 */
8102 RTGCPTR GCPtrBase;
8103 if (uSel & X86_SEL_LDT)
8104 {
8105 if ( !pCtx->ldtr.Attr.n.u1Present
8106 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8107 {
8108 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8109 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8110 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8111 uErrorCode, 0);
8112 }
8113
8114 Assert(pCtx->ldtr.Attr.n.u1Present);
8115 GCPtrBase = pCtx->ldtr.u64Base;
8116 }
8117 else
8118 {
8119 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8120 {
8121 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8122 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8123 uErrorCode, 0);
8124 }
8125 GCPtrBase = pCtx->gdtr.pGdt;
8126 }
8127
8128 /*
8129 * Read the legacy descriptor and maybe the long mode extensions if
8130 * required.
8131 */
8132 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8133 if (rcStrict == VINF_SUCCESS)
8134 {
8135 if ( !IEM_IS_LONG_MODE(pIemCpu)
8136 || pDesc->Legacy.Gen.u1DescType)
8137 pDesc->Long.au64[1] = 0;
8138 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8139 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8140 else
8141 {
8142 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8143 /** @todo is this the right exception? */
8144 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8145 }
8146 }
8147 return rcStrict;
8148}
8149
8150
8151/**
8152 * Fetches a descriptor table entry.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pIemCpu The IEM per CPU.
8156 * @param pDesc Where to return the descriptor table entry.
8157 * @param uSel The selector which table entry to fetch.
8158 * @param uXcpt The exception to raise on table lookup error.
8159 */
8160static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8161{
8162 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8163}
8164
8165
8166/**
8167 * Fakes a long mode stack selector for SS = 0.
8168 *
8169 * @param pDescSs Where to return the fake stack descriptor.
8170 * @param uDpl The DPL we want.
8171 */
8172static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8173{
8174 pDescSs->Long.au64[0] = 0;
8175 pDescSs->Long.au64[1] = 0;
8176 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8177 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8178 pDescSs->Long.Gen.u2Dpl = uDpl;
8179 pDescSs->Long.Gen.u1Present = 1;
8180 pDescSs->Long.Gen.u1Long = 1;
8181}
8182
8183
8184/**
8185 * Marks the selector descriptor as accessed (only non-system descriptors).
8186 *
8187 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8188 * will therefore skip the limit checks.
8189 *
8190 * @returns Strict VBox status code.
8191 * @param pIemCpu The IEM per CPU.
8192 * @param uSel The selector.
8193 */
8194static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8195{
8196 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8197
8198 /*
8199 * Get the selector table base and calculate the entry address.
8200 */
8201 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8202 ? pCtx->ldtr.u64Base
8203 : pCtx->gdtr.pGdt;
8204 GCPtr += uSel & X86_SEL_MASK;
8205
8206 /*
8207 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8208 * ugly stuff to avoid this. This will make sure it's an atomic access
8209 * as well more or less remove any question about 8-bit or 32-bit accesss.
8210 */
8211 VBOXSTRICTRC rcStrict;
8212 uint32_t volatile *pu32;
8213 if ((GCPtr & 3) == 0)
8214 {
8215 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8216 GCPtr += 2 + 2;
8217 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8218 if (rcStrict != VINF_SUCCESS)
8219 return rcStrict;
8220 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8221 }
8222 else
8223 {
8224 /* The misaligned GDT/LDT case, map the whole thing. */
8225 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8226 if (rcStrict != VINF_SUCCESS)
8227 return rcStrict;
8228 switch ((uintptr_t)pu32 & 3)
8229 {
8230 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8231 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8232 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8233 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8234 }
8235 }
8236
8237 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8238}
8239
8240/** @} */
8241
8242
8243/*
8244 * Include the C/C++ implementation of instruction.
8245 */
8246#include "IEMAllCImpl.cpp.h"
8247
8248
8249
8250/** @name "Microcode" macros.
8251 *
8252 * The idea is that we should be able to use the same code to interpret
8253 * instructions as well as recompiler instructions. Thus this obfuscation.
8254 *
8255 * @{
8256 */
8257#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8258#define IEM_MC_END() }
8259#define IEM_MC_PAUSE() do {} while (0)
8260#define IEM_MC_CONTINUE() do {} while (0)
8261
8262/** Internal macro. */
8263#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8264 do \
8265 { \
8266 VBOXSTRICTRC rcStrict2 = a_Expr; \
8267 if (rcStrict2 != VINF_SUCCESS) \
8268 return rcStrict2; \
8269 } while (0)
8270
8271#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8272#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8273#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8274#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8275#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8276#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8277#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8278
8279#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8280#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8281 do { \
8282 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8283 return iemRaiseDeviceNotAvailable(pIemCpu); \
8284 } while (0)
8285#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8286 do { \
8287 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
8288 return iemRaiseMathFault(pIemCpu); \
8289 } while (0)
8290#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8291 do { \
8292 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8293 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
8294 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
8295 return iemRaiseUndefinedOpcode(pIemCpu); \
8296 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8297 return iemRaiseDeviceNotAvailable(pIemCpu); \
8298 } while (0)
8299#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8300 do { \
8301 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8302 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
8303 return iemRaiseUndefinedOpcode(pIemCpu); \
8304 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8305 return iemRaiseDeviceNotAvailable(pIemCpu); \
8306 } while (0)
8307#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8308 do { \
8309 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8310 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
8311 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
8312 return iemRaiseUndefinedOpcode(pIemCpu); \
8313 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8314 return iemRaiseDeviceNotAvailable(pIemCpu); \
8315 } while (0)
8316#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8317 do { \
8318 if (pIemCpu->uCpl != 0) \
8319 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8320 } while (0)
8321
8322
8323#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8324#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8325#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8326#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8327#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8328#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8329#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8330 uint32_t a_Name; \
8331 uint32_t *a_pName = &a_Name
8332#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8333 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8334
8335#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8336#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8337
8338#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8339#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8340#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8341#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8342#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8343#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8344#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8345#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8346#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8347#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8348#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8349#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8350#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8351#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8352#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8353#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8354#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8355#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8356#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8357#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8358#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8359#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8360#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8361#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8362#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8363#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8364#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8365#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8366#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8367/** @note Not for IOPL or IF testing or modification. */
8368#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8369#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8370#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
8371#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
8372
8373#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8374#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8375#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8376#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8377#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8378#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8379#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8380#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8381#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8382#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8383#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8384 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8385
8386#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8387#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8388/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8389 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8390#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8391#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8392/** @note Not for IOPL or IF testing or modification. */
8393#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8394
8395#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8396#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8397#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8398 do { \
8399 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8400 *pu32Reg += (a_u32Value); \
8401 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8402 } while (0)
8403#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8404
8405#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8406#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8407#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8408 do { \
8409 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8410 *pu32Reg -= (a_u32Value); \
8411 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8412 } while (0)
8413#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8414
8415#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8416#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8417#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8418#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8419#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8420#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8421#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8422
8423#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8424#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8425#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8426#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8427
8428#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8429#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8430#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8431
8432#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8433#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8434
8435#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8436#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8437#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8438
8439#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8440#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8441#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8442
8443#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8444
8445#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8446
8447#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8448#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8449#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8450 do { \
8451 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8452 *pu32Reg &= (a_u32Value); \
8453 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8454 } while (0)
8455#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8456
8457#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8458#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8459#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8460 do { \
8461 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8462 *pu32Reg |= (a_u32Value); \
8463 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8464 } while (0)
8465#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8466
8467
8468/** @note Not for IOPL or IF modification. */
8469#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8470/** @note Not for IOPL or IF modification. */
8471#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8472/** @note Not for IOPL or IF modification. */
8473#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8474
8475#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8476
8477
8478#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8479 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
8480#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8481 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
8482#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8483 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8484#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8485 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8486#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8487 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8488#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8489 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8490#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8491 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8492
8493#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8494 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
8495#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8496 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
8497#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8498 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
8499#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8500 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8501#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8502 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8503 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8504 } while (0)
8505#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8506 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8507 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8508 } while (0)
8509#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8510 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8511#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8512 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8513#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8514 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
8515
8516#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8517 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8518#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8519 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8520#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8522
8523#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8524 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8525#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8526 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8527#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8528 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8529
8530#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8531 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8532#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8533 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8534#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8535 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8536
8537#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8538 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8539
8540#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8541 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8542#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8543 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8544#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8545 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8546
8547#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8549#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8550 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8551#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8552 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8553
8554#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8555 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8556#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8557 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8558
8559
8560
8561#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8562 do { \
8563 uint8_t u8Tmp; \
8564 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8565 (a_u16Dst) = u8Tmp; \
8566 } while (0)
8567#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8568 do { \
8569 uint8_t u8Tmp; \
8570 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8571 (a_u32Dst) = u8Tmp; \
8572 } while (0)
8573#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8574 do { \
8575 uint8_t u8Tmp; \
8576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8577 (a_u64Dst) = u8Tmp; \
8578 } while (0)
8579#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8580 do { \
8581 uint16_t u16Tmp; \
8582 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8583 (a_u32Dst) = u16Tmp; \
8584 } while (0)
8585#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8586 do { \
8587 uint16_t u16Tmp; \
8588 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8589 (a_u64Dst) = u16Tmp; \
8590 } while (0)
8591#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8592 do { \
8593 uint32_t u32Tmp; \
8594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8595 (a_u64Dst) = u32Tmp; \
8596 } while (0)
8597
8598#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8599 do { \
8600 uint8_t u8Tmp; \
8601 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8602 (a_u16Dst) = (int8_t)u8Tmp; \
8603 } while (0)
8604#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8605 do { \
8606 uint8_t u8Tmp; \
8607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8608 (a_u32Dst) = (int8_t)u8Tmp; \
8609 } while (0)
8610#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8611 do { \
8612 uint8_t u8Tmp; \
8613 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8614 (a_u64Dst) = (int8_t)u8Tmp; \
8615 } while (0)
8616#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8617 do { \
8618 uint16_t u16Tmp; \
8619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8620 (a_u32Dst) = (int16_t)u16Tmp; \
8621 } while (0)
8622#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8623 do { \
8624 uint16_t u16Tmp; \
8625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8626 (a_u64Dst) = (int16_t)u16Tmp; \
8627 } while (0)
8628#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8629 do { \
8630 uint32_t u32Tmp; \
8631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8632 (a_u64Dst) = (int32_t)u32Tmp; \
8633 } while (0)
8634
8635#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8636 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8637#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8638 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8639#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8640 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8641#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8642 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8643
8644#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8645 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8646#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8647 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8648#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8649 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8650#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8651 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8652
8653#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8654#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8655#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8656#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8657#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8658#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8659#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8660 do { \
8661 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8662 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8663 } while (0)
8664
8665#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8666 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8667#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8669
8670
8671#define IEM_MC_PUSH_U16(a_u16Value) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8673#define IEM_MC_PUSH_U32(a_u32Value) \
8674 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8675#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8676 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8677#define IEM_MC_PUSH_U64(a_u64Value) \
8678 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8679
8680#define IEM_MC_POP_U16(a_pu16Value) \
8681 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8682#define IEM_MC_POP_U32(a_pu32Value) \
8683 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8684#define IEM_MC_POP_U64(a_pu64Value) \
8685 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8686
8687/** Maps guest memory for direct or bounce buffered access.
8688 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8689 * @remarks May return.
8690 */
8691#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8692 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8693
8694/** Maps guest memory for direct or bounce buffered access.
8695 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8696 * @remarks May return.
8697 */
8698#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8699 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8700
8701/** Commits the memory and unmaps the guest memory.
8702 * @remarks May return.
8703 */
8704#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8705 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8706
8707/** Commits the memory and unmaps the guest memory unless the FPU status word
8708 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8709 * that would cause FLD not to store.
8710 *
8711 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8712 * store, while \#P will not.
8713 *
8714 * @remarks May in theory return - for now.
8715 */
8716#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8717 do { \
8718 if ( !(a_u16FSW & X86_FSW_ES) \
8719 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8720 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
8721 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8722 } while (0)
8723
8724/** Calculate efficient address from R/M. */
8725#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8726 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8727
8728#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8729#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8730#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8731#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8732#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8733#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8734#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8735
8736/**
8737 * Defers the rest of the instruction emulation to a C implementation routine
8738 * and returns, only taking the standard parameters.
8739 *
8740 * @param a_pfnCImpl The pointer to the C routine.
8741 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8742 */
8743#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8744
8745/**
8746 * Defers the rest of instruction emulation to a C implementation routine and
8747 * returns, taking one argument in addition to the standard ones.
8748 *
8749 * @param a_pfnCImpl The pointer to the C routine.
8750 * @param a0 The argument.
8751 */
8752#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8753
8754/**
8755 * Defers the rest of the instruction emulation to a C implementation routine
8756 * and returns, taking two arguments in addition to the standard ones.
8757 *
8758 * @param a_pfnCImpl The pointer to the C routine.
8759 * @param a0 The first extra argument.
8760 * @param a1 The second extra argument.
8761 */
8762#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8763
8764/**
8765 * Defers the rest of the instruction emulation to a C implementation routine
8766 * and returns, taking three arguments in addition to the standard ones.
8767 *
8768 * @param a_pfnCImpl The pointer to the C routine.
8769 * @param a0 The first extra argument.
8770 * @param a1 The second extra argument.
8771 * @param a2 The third extra argument.
8772 */
8773#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8774
8775/**
8776 * Defers the rest of the instruction emulation to a C implementation routine
8777 * and returns, taking four arguments in addition to the standard ones.
8778 *
8779 * @param a_pfnCImpl The pointer to the C routine.
8780 * @param a0 The first extra argument.
8781 * @param a1 The second extra argument.
8782 * @param a2 The third extra argument.
8783 * @param a3 The fourth extra argument.
8784 */
8785#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8786
8787/**
8788 * Defers the rest of the instruction emulation to a C implementation routine
8789 * and returns, taking two arguments in addition to the standard ones.
8790 *
8791 * @param a_pfnCImpl The pointer to the C routine.
8792 * @param a0 The first extra argument.
8793 * @param a1 The second extra argument.
8794 * @param a2 The third extra argument.
8795 * @param a3 The fourth extra argument.
8796 * @param a4 The fifth extra argument.
8797 */
8798#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8799
8800/**
8801 * Defers the entire instruction emulation to a C implementation routine and
8802 * returns, only taking the standard parameters.
8803 *
8804 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8805 *
8806 * @param a_pfnCImpl The pointer to the C routine.
8807 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8808 */
8809#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8810
8811/**
8812 * Defers the entire instruction emulation to a C implementation routine and
8813 * returns, taking one argument in addition to the standard ones.
8814 *
8815 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8816 *
8817 * @param a_pfnCImpl The pointer to the C routine.
8818 * @param a0 The argument.
8819 */
8820#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8821
8822/**
8823 * Defers the entire instruction emulation to a C implementation routine and
8824 * returns, taking two arguments in addition to the standard ones.
8825 *
8826 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8827 *
8828 * @param a_pfnCImpl The pointer to the C routine.
8829 * @param a0 The first extra argument.
8830 * @param a1 The second extra argument.
8831 */
8832#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8833
8834/**
8835 * Defers the entire instruction emulation to a C implementation routine and
8836 * returns, taking three arguments in addition to the standard ones.
8837 *
8838 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8839 *
8840 * @param a_pfnCImpl The pointer to the C routine.
8841 * @param a0 The first extra argument.
8842 * @param a1 The second extra argument.
8843 * @param a2 The third extra argument.
8844 */
8845#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8846
8847/**
8848 * Calls a FPU assembly implementation taking one visible argument.
8849 *
8850 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8851 * @param a0 The first extra argument.
8852 */
8853#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8854 do { \
8855 iemFpuPrepareUsage(pIemCpu); \
8856 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
8857 } while (0)
8858
8859/**
8860 * Calls a FPU assembly implementation taking two visible arguments.
8861 *
8862 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8863 * @param a0 The first extra argument.
8864 * @param a1 The second extra argument.
8865 */
8866#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8867 do { \
8868 iemFpuPrepareUsage(pIemCpu); \
8869 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
8870 } while (0)
8871
8872/**
8873 * Calls a FPU assembly implementation taking three visible arguments.
8874 *
8875 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8876 * @param a0 The first extra argument.
8877 * @param a1 The second extra argument.
8878 * @param a2 The third extra argument.
8879 */
8880#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8881 do { \
8882 iemFpuPrepareUsage(pIemCpu); \
8883 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
8884 } while (0)
8885
8886#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
8887 do { \
8888 (a_FpuData).FSW = (a_FSW); \
8889 (a_FpuData).r80Result = *(a_pr80Value); \
8890 } while (0)
8891
8892/** Pushes FPU result onto the stack. */
8893#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
8894 iemFpuPushResult(pIemCpu, &a_FpuData)
8895/** Pushes FPU result onto the stack and sets the FPUDP. */
8896#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
8897 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
8898
8899/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
8900#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
8901 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
8902
8903/** Stores FPU result in a stack register. */
8904#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
8905 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
8906/** Stores FPU result in a stack register and pops the stack. */
8907#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
8908 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
8909/** Stores FPU result in a stack register and sets the FPUDP. */
8910#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8911 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8912/** Stores FPU result in a stack register, sets the FPUDP, and pops the
8913 * stack. */
8914#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8915 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8916
8917/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
8918#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
8919 iemFpuUpdateOpcodeAndIp(pIemCpu)
8920/** Free a stack register (for FFREE and FFREEP). */
8921#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
8922 iemFpuStackFree(pIemCpu, a_iStReg)
8923/** Increment the FPU stack pointer. */
8924#define IEM_MC_FPU_STACK_INC_TOP() \
8925 iemFpuStackIncTop(pIemCpu)
8926/** Decrement the FPU stack pointer. */
8927#define IEM_MC_FPU_STACK_DEC_TOP() \
8928 iemFpuStackDecTop(pIemCpu)
8929
8930/** Updates the FSW, FOP, FPUIP, and FPUCS. */
8931#define IEM_MC_UPDATE_FSW(a_u16FSW) \
8932 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8933/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
8934#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
8935 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8936/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
8937#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8938 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8939/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
8940#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
8941 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8942/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
8943 * stack. */
8944#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8945 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8946/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
8947#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
8948 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8949
8950/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
8951#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
8952 iemFpuStackUnderflow(pIemCpu, a_iStDst)
8953/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8954 * stack. */
8955#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
8956 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
8957/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8958 * FPUDS. */
8959#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8960 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8961/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8962 * FPUDS. Pops stack. */
8963#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8964 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8965/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8966 * stack twice. */
8967#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8968 iemFpuStackUnderflowThenPopPop(pIemCpu)
8969/** Raises a FPU stack underflow exception for an instruction pushing a result
8970 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8971#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8972 iemFpuStackPushUnderflow(pIemCpu)
8973/** Raises a FPU stack underflow exception for an instruction pushing a result
8974 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8975#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8976 iemFpuStackPushUnderflowTwo(pIemCpu)
8977
8978/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8979 * FPUIP, FPUCS and FOP. */
8980#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
8981 iemFpuStackPushOverflow(pIemCpu)
8982/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8983 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
8984#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
8985 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
8986/** Indicates that we (might) have modified the FPU state. */
8987#define IEM_MC_USED_FPU() \
8988 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
8989
8990/**
8991 * Calls a MMX assembly implementation taking two visible arguments.
8992 *
8993 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8994 * @param a0 The first extra argument.
8995 * @param a1 The second extra argument.
8996 */
8997#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
8998 do { \
8999 iemFpuPrepareUsage(pIemCpu); \
9000 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9001 } while (0)
9002
9003/**
9004 * Calls a MMX assembly implementation taking three visible arguments.
9005 *
9006 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9007 * @param a0 The first extra argument.
9008 * @param a1 The second extra argument.
9009 * @param a2 The third extra argument.
9010 */
9011#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9012 do { \
9013 iemFpuPrepareUsage(pIemCpu); \
9014 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9015 } while (0)
9016
9017
9018/**
9019 * Calls a SSE assembly implementation taking two visible arguments.
9020 *
9021 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9022 * @param a0 The first extra argument.
9023 * @param a1 The second extra argument.
9024 */
9025#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9026 do { \
9027 iemFpuPrepareUsageSse(pIemCpu); \
9028 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9029 } while (0)
9030
9031/**
9032 * Calls a SSE assembly implementation taking three visible arguments.
9033 *
9034 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9035 * @param a0 The first extra argument.
9036 * @param a1 The second extra argument.
9037 * @param a2 The third extra argument.
9038 */
9039#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9040 do { \
9041 iemFpuPrepareUsageSse(pIemCpu); \
9042 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9043 } while (0)
9044
9045
9046/** @note Not for IOPL or IF testing. */
9047#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9048/** @note Not for IOPL or IF testing. */
9049#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9050/** @note Not for IOPL or IF testing. */
9051#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9052/** @note Not for IOPL or IF testing. */
9053#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9054/** @note Not for IOPL or IF testing. */
9055#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9056 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9057 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9058/** @note Not for IOPL or IF testing. */
9059#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9060 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9061 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9062/** @note Not for IOPL or IF testing. */
9063#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9064 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9065 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9066 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9067/** @note Not for IOPL or IF testing. */
9068#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9069 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9070 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9071 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9072#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9073#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9074#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9075/** @note Not for IOPL or IF testing. */
9076#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9077 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9078 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9079/** @note Not for IOPL or IF testing. */
9080#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9081 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9082 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9083/** @note Not for IOPL or IF testing. */
9084#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9085 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9086 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9087/** @note Not for IOPL or IF testing. */
9088#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9089 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9090 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9091/** @note Not for IOPL or IF testing. */
9092#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9093 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9094 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9095/** @note Not for IOPL or IF testing. */
9096#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9097 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9098 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9099#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9100#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9101#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9102 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9103#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9104 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9105#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9106 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9107#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9108 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9109#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9110 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9111#define IEM_MC_IF_FCW_IM() \
9112 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
9113
9114#define IEM_MC_ELSE() } else {
9115#define IEM_MC_ENDIF() } do {} while (0)
9116
9117/** @} */
9118
9119
9120/** @name Opcode Debug Helpers.
9121 * @{
9122 */
9123#ifdef DEBUG
9124# define IEMOP_MNEMONIC(a_szMnemonic) \
9125 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9126 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9127# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9128 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9129 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9130#else
9131# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9132# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9133#endif
9134
9135/** @} */
9136
9137
9138/** @name Opcode Helpers.
9139 * @{
9140 */
9141
9142/** The instruction raises an \#UD in real and V8086 mode. */
9143#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9144 do \
9145 { \
9146 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9147 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9148 } while (0)
9149
9150/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9151 * lock prefixed.
9152 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9153#define IEMOP_HLP_NO_LOCK_PREFIX() \
9154 do \
9155 { \
9156 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9157 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9158 } while (0)
9159
9160/** The instruction is not available in 64-bit mode, throw #UD if we're in
9161 * 64-bit mode. */
9162#define IEMOP_HLP_NO_64BIT() \
9163 do \
9164 { \
9165 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9166 return IEMOP_RAISE_INVALID_OPCODE(); \
9167 } while (0)
9168
9169/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9170 * 64-bit mode. */
9171#define IEMOP_HLP_ONLY_64BIT() \
9172 do \
9173 { \
9174 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9175 return IEMOP_RAISE_INVALID_OPCODE(); \
9176 } while (0)
9177
9178/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9179#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9180 do \
9181 { \
9182 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9183 iemRecalEffOpSize64Default(pIemCpu); \
9184 } while (0)
9185
9186/** The instruction has 64-bit operand size if 64-bit mode. */
9187#define IEMOP_HLP_64BIT_OP_SIZE() \
9188 do \
9189 { \
9190 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9191 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9192 } while (0)
9193
9194/** Only a REX prefix immediately preceeding the first opcode byte takes
9195 * effect. This macro helps ensuring this as well as logging bad guest code. */
9196#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9197 do \
9198 { \
9199 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9200 { \
9201 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9202 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9203 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9204 pIemCpu->uRexB = 0; \
9205 pIemCpu->uRexIndex = 0; \
9206 pIemCpu->uRexReg = 0; \
9207 iemRecalEffOpSize(pIemCpu); \
9208 } \
9209 } while (0)
9210
9211/**
9212 * Done decoding.
9213 */
9214#define IEMOP_HLP_DONE_DECODING() \
9215 do \
9216 { \
9217 /*nothing for now, maybe later... */ \
9218 } while (0)
9219
9220/**
9221 * Done decoding, raise \#UD exception if lock prefix present.
9222 */
9223#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9224 do \
9225 { \
9226 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9227 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9228 } while (0)
9229#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9230 do \
9231 { \
9232 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9233 { \
9234 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9235 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9236 } \
9237 } while (0)
9238#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9239 do \
9240 { \
9241 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9242 { \
9243 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9244 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9245 } \
9246 } while (0)
9247
9248
9249/**
9250 * Calculates the effective address of a ModR/M memory operand.
9251 *
9252 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9253 *
9254 * @return Strict VBox status code.
9255 * @param pIemCpu The IEM per CPU data.
9256 * @param bRm The ModRM byte.
9257 * @param cbImm The size of any immediate following the
9258 * effective address opcode bytes. Important for
9259 * RIP relative addressing.
9260 * @param pGCPtrEff Where to return the effective address.
9261 */
9262static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9263{
9264 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9265 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9266#define SET_SS_DEF() \
9267 do \
9268 { \
9269 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9270 pIemCpu->iEffSeg = X86_SREG_SS; \
9271 } while (0)
9272
9273 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9274 {
9275/** @todo Check the effective address size crap! */
9276 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9277 {
9278 uint16_t u16EffAddr;
9279
9280 /* Handle the disp16 form with no registers first. */
9281 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9282 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9283 else
9284 {
9285 /* Get the displacment. */
9286 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9287 {
9288 case 0: u16EffAddr = 0; break;
9289 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9290 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9291 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9292 }
9293
9294 /* Add the base and index registers to the disp. */
9295 switch (bRm & X86_MODRM_RM_MASK)
9296 {
9297 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9298 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9299 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9300 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9301 case 4: u16EffAddr += pCtx->si; break;
9302 case 5: u16EffAddr += pCtx->di; break;
9303 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9304 case 7: u16EffAddr += pCtx->bx; break;
9305 }
9306 }
9307
9308 *pGCPtrEff = u16EffAddr;
9309 }
9310 else
9311 {
9312 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9313 uint32_t u32EffAddr;
9314
9315 /* Handle the disp32 form with no registers first. */
9316 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9317 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9318 else
9319 {
9320 /* Get the register (or SIB) value. */
9321 switch ((bRm & X86_MODRM_RM_MASK))
9322 {
9323 case 0: u32EffAddr = pCtx->eax; break;
9324 case 1: u32EffAddr = pCtx->ecx; break;
9325 case 2: u32EffAddr = pCtx->edx; break;
9326 case 3: u32EffAddr = pCtx->ebx; break;
9327 case 4: /* SIB */
9328 {
9329 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9330
9331 /* Get the index and scale it. */
9332 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9333 {
9334 case 0: u32EffAddr = pCtx->eax; break;
9335 case 1: u32EffAddr = pCtx->ecx; break;
9336 case 2: u32EffAddr = pCtx->edx; break;
9337 case 3: u32EffAddr = pCtx->ebx; break;
9338 case 4: u32EffAddr = 0; /*none */ break;
9339 case 5: u32EffAddr = pCtx->ebp; break;
9340 case 6: u32EffAddr = pCtx->esi; break;
9341 case 7: u32EffAddr = pCtx->edi; break;
9342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9343 }
9344 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9345
9346 /* add base */
9347 switch (bSib & X86_SIB_BASE_MASK)
9348 {
9349 case 0: u32EffAddr += pCtx->eax; break;
9350 case 1: u32EffAddr += pCtx->ecx; break;
9351 case 2: u32EffAddr += pCtx->edx; break;
9352 case 3: u32EffAddr += pCtx->ebx; break;
9353 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9354 case 5:
9355 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9356 {
9357 u32EffAddr += pCtx->ebp;
9358 SET_SS_DEF();
9359 }
9360 else
9361 {
9362 uint32_t u32Disp;
9363 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9364 u32EffAddr += u32Disp;
9365 }
9366 break;
9367 case 6: u32EffAddr += pCtx->esi; break;
9368 case 7: u32EffAddr += pCtx->edi; break;
9369 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9370 }
9371 break;
9372 }
9373 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9374 case 6: u32EffAddr = pCtx->esi; break;
9375 case 7: u32EffAddr = pCtx->edi; break;
9376 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9377 }
9378
9379 /* Get and add the displacement. */
9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9381 {
9382 case 0:
9383 break;
9384 case 1:
9385 {
9386 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9387 u32EffAddr += i8Disp;
9388 break;
9389 }
9390 case 2:
9391 {
9392 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9393 u32EffAddr += u32Disp;
9394 break;
9395 }
9396 default:
9397 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9398 }
9399
9400 }
9401 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9402 *pGCPtrEff = u32EffAddr;
9403 else
9404 {
9405 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9406 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9407 }
9408 }
9409 }
9410 else
9411 {
9412 uint64_t u64EffAddr;
9413
9414 /* Handle the rip+disp32 form with no registers first. */
9415 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9416 {
9417 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9418 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9419 }
9420 else
9421 {
9422 /* Get the register (or SIB) value. */
9423 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9424 {
9425 case 0: u64EffAddr = pCtx->rax; break;
9426 case 1: u64EffAddr = pCtx->rcx; break;
9427 case 2: u64EffAddr = pCtx->rdx; break;
9428 case 3: u64EffAddr = pCtx->rbx; break;
9429 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9430 case 6: u64EffAddr = pCtx->rsi; break;
9431 case 7: u64EffAddr = pCtx->rdi; break;
9432 case 8: u64EffAddr = pCtx->r8; break;
9433 case 9: u64EffAddr = pCtx->r9; break;
9434 case 10: u64EffAddr = pCtx->r10; break;
9435 case 11: u64EffAddr = pCtx->r11; break;
9436 case 13: u64EffAddr = pCtx->r13; break;
9437 case 14: u64EffAddr = pCtx->r14; break;
9438 case 15: u64EffAddr = pCtx->r15; break;
9439 /* SIB */
9440 case 4:
9441 case 12:
9442 {
9443 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9444
9445 /* Get the index and scale it. */
9446 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9447 {
9448 case 0: u64EffAddr = pCtx->rax; break;
9449 case 1: u64EffAddr = pCtx->rcx; break;
9450 case 2: u64EffAddr = pCtx->rdx; break;
9451 case 3: u64EffAddr = pCtx->rbx; break;
9452 case 4: u64EffAddr = 0; /*none */ break;
9453 case 5: u64EffAddr = pCtx->rbp; break;
9454 case 6: u64EffAddr = pCtx->rsi; break;
9455 case 7: u64EffAddr = pCtx->rdi; break;
9456 case 8: u64EffAddr = pCtx->r8; break;
9457 case 9: u64EffAddr = pCtx->r9; break;
9458 case 10: u64EffAddr = pCtx->r10; break;
9459 case 11: u64EffAddr = pCtx->r11; break;
9460 case 12: u64EffAddr = pCtx->r12; break;
9461 case 13: u64EffAddr = pCtx->r13; break;
9462 case 14: u64EffAddr = pCtx->r14; break;
9463 case 15: u64EffAddr = pCtx->r15; break;
9464 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9465 }
9466 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9467
9468 /* add base */
9469 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9470 {
9471 case 0: u64EffAddr += pCtx->rax; break;
9472 case 1: u64EffAddr += pCtx->rcx; break;
9473 case 2: u64EffAddr += pCtx->rdx; break;
9474 case 3: u64EffAddr += pCtx->rbx; break;
9475 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9476 case 6: u64EffAddr += pCtx->rsi; break;
9477 case 7: u64EffAddr += pCtx->rdi; break;
9478 case 8: u64EffAddr += pCtx->r8; break;
9479 case 9: u64EffAddr += pCtx->r9; break;
9480 case 10: u64EffAddr += pCtx->r10; break;
9481 case 11: u64EffAddr += pCtx->r11; break;
9482 case 12: u64EffAddr += pCtx->r12; break;
9483 case 14: u64EffAddr += pCtx->r14; break;
9484 case 15: u64EffAddr += pCtx->r15; break;
9485 /* complicated encodings */
9486 case 5:
9487 case 13:
9488 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9489 {
9490 if (!pIemCpu->uRexB)
9491 {
9492 u64EffAddr += pCtx->rbp;
9493 SET_SS_DEF();
9494 }
9495 else
9496 u64EffAddr += pCtx->r13;
9497 }
9498 else
9499 {
9500 uint32_t u32Disp;
9501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9502 u64EffAddr += (int32_t)u32Disp;
9503 }
9504 break;
9505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9506 }
9507 break;
9508 }
9509 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9510 }
9511
9512 /* Get and add the displacement. */
9513 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9514 {
9515 case 0:
9516 break;
9517 case 1:
9518 {
9519 int8_t i8Disp;
9520 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9521 u64EffAddr += i8Disp;
9522 break;
9523 }
9524 case 2:
9525 {
9526 uint32_t u32Disp;
9527 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9528 u64EffAddr += (int32_t)u32Disp;
9529 break;
9530 }
9531 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9532 }
9533
9534 }
9535
9536 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9537 *pGCPtrEff = u64EffAddr;
9538 else
9539 {
9540 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9541 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9542 }
9543 }
9544
9545 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9546 return VINF_SUCCESS;
9547}
9548
9549/** @} */
9550
9551
9552
9553/*
9554 * Include the instructions
9555 */
9556#include "IEMAllInstructions.cpp.h"
9557
9558
9559
9560
9561#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9562
9563/**
9564 * Sets up execution verification mode.
9565 */
9566static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9567{
9568 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9569 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9570
9571 /*
9572 * Always note down the address of the current instruction.
9573 */
9574 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9575 pIemCpu->uOldRip = pOrgCtx->rip;
9576
9577 /*
9578 * Enable verification and/or logging.
9579 */
9580 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9581 if ( fNewNoRem
9582 && ( 0
9583#if 0 /* auto enable on first paged protected mode interrupt */
9584 || ( pOrgCtx->eflags.Bits.u1IF
9585 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9586 && TRPMHasTrap(pVCpu)
9587 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9588#endif
9589#if 0
9590 || ( pOrgCtx->cs == 0x10
9591 && ( pOrgCtx->rip == 0x90119e3e
9592 || pOrgCtx->rip == 0x901d9810)
9593#endif
9594#if 0 /* Auto enable DSL - FPU stuff. */
9595 || ( pOrgCtx->cs == 0x10
9596 && (// pOrgCtx->rip == 0xc02ec07f
9597 //|| pOrgCtx->rip == 0xc02ec082
9598 //|| pOrgCtx->rip == 0xc02ec0c9
9599 0
9600 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9601#endif
9602#if 0 /* Auto enable DSL - fstp st0 stuff. */
9603 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9604#endif
9605#if 0
9606 || pOrgCtx->rip == 0x9022bb3a
9607#endif
9608#if 0
9609 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9610#endif
9611#if 0
9612 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9613 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9614#endif
9615#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9616 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9617 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9618 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9619#endif
9620#if 0 /* NT4SP1 - xadd early boot. */
9621 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9622#endif
9623#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9624 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9625#endif
9626#if 0 /* NT4SP1 - cmpxchg (AMD). */
9627 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9628#endif
9629#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9630 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9631#endif
9632#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9633 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9634
9635#endif
9636#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9637 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9638
9639#endif
9640#if 0 /* NT4SP1 - frstor [ecx] */
9641 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9642#endif
9643#if 0 /* xxxxxx - All long mode code. */
9644 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9645#endif
9646#if 0 /* rep movsq linux 3.7 64-bit boot. */
9647 || (pOrgCtx->rip == 0x0000000000100241)
9648#endif
9649#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9650 || (pOrgCtx->rip == 0x000000000215e240)
9651#endif
9652#if 0 /* DOS's size-overridden iret to v8086. */
9653 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9654#endif
9655 )
9656 )
9657 {
9658 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9659 RTLogFlags(NULL, "enabled");
9660 fNewNoRem = false;
9661 }
9662 if (fNewNoRem != pIemCpu->fNoRem)
9663 {
9664 pIemCpu->fNoRem = fNewNoRem;
9665 if (!fNewNoRem)
9666 {
9667 LogAlways(("Enabling verification mode!\n"));
9668 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9669 }
9670 else
9671 LogAlways(("Disabling verification mode!\n"));
9672 }
9673
9674 /*
9675 * Switch state.
9676 */
9677 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9678 {
9679 static CPUMCTX s_DebugCtx; /* Ugly! */
9680
9681 s_DebugCtx = *pOrgCtx;
9682 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9683 }
9684
9685 /*
9686 * See if there is an interrupt pending in TRPM and inject it if we can.
9687 */
9688 pIemCpu->uInjectCpl = UINT8_MAX;
9689 if ( pOrgCtx->eflags.Bits.u1IF
9690 && TRPMHasTrap(pVCpu)
9691 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9692 {
9693 uint8_t u8TrapNo;
9694 TRPMEVENT enmType;
9695 RTGCUINT uErrCode;
9696 RTGCPTR uCr2;
9697 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9698 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9699 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9700 TRPMResetTrap(pVCpu);
9701 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9702 }
9703
9704 /*
9705 * Reset the counters.
9706 */
9707 pIemCpu->cIOReads = 0;
9708 pIemCpu->cIOWrites = 0;
9709 pIemCpu->fIgnoreRaxRdx = false;
9710 pIemCpu->fOverlappingMovs = false;
9711 pIemCpu->fProblematicMemory = false;
9712 pIemCpu->fUndefinedEFlags = 0;
9713
9714 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9715 {
9716 /*
9717 * Free all verification records.
9718 */
9719 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9720 pIemCpu->pIemEvtRecHead = NULL;
9721 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9722 do
9723 {
9724 while (pEvtRec)
9725 {
9726 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9727 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9728 pIemCpu->pFreeEvtRec = pEvtRec;
9729 pEvtRec = pNext;
9730 }
9731 pEvtRec = pIemCpu->pOtherEvtRecHead;
9732 pIemCpu->pOtherEvtRecHead = NULL;
9733 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9734 } while (pEvtRec);
9735 }
9736}
9737
9738
9739/**
9740 * Allocate an event record.
9741 * @returns Pointer to a record.
9742 */
9743static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9744{
9745 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9746 return NULL;
9747
9748 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9749 if (pEvtRec)
9750 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9751 else
9752 {
9753 if (!pIemCpu->ppIemEvtRecNext)
9754 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9755
9756 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9757 if (!pEvtRec)
9758 return NULL;
9759 }
9760 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9761 pEvtRec->pNext = NULL;
9762 return pEvtRec;
9763}
9764
9765
9766/**
9767 * IOMMMIORead notification.
9768 */
9769VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9770{
9771 PVMCPU pVCpu = VMMGetCpu(pVM);
9772 if (!pVCpu)
9773 return;
9774 PIEMCPU pIemCpu = &pVCpu->iem.s;
9775 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9776 if (!pEvtRec)
9777 return;
9778 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9779 pEvtRec->u.RamRead.GCPhys = GCPhys;
9780 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9781 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9782 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9783}
9784
9785
9786/**
9787 * IOMMMIOWrite notification.
9788 */
9789VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9790{
9791 PVMCPU pVCpu = VMMGetCpu(pVM);
9792 if (!pVCpu)
9793 return;
9794 PIEMCPU pIemCpu = &pVCpu->iem.s;
9795 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9796 if (!pEvtRec)
9797 return;
9798 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9799 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9800 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9801 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9802 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9803 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9804 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9805 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9806 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9807}
9808
9809
9810/**
9811 * IOMIOPortRead notification.
9812 */
9813VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9814{
9815 PVMCPU pVCpu = VMMGetCpu(pVM);
9816 if (!pVCpu)
9817 return;
9818 PIEMCPU pIemCpu = &pVCpu->iem.s;
9819 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9820 if (!pEvtRec)
9821 return;
9822 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9823 pEvtRec->u.IOPortRead.Port = Port;
9824 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9825 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9826 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9827}
9828
9829/**
9830 * IOMIOPortWrite notification.
9831 */
9832VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9833{
9834 PVMCPU pVCpu = VMMGetCpu(pVM);
9835 if (!pVCpu)
9836 return;
9837 PIEMCPU pIemCpu = &pVCpu->iem.s;
9838 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9839 if (!pEvtRec)
9840 return;
9841 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9842 pEvtRec->u.IOPortWrite.Port = Port;
9843 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9844 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9845 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9846 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9847}
9848
9849
9850VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9851{
9852 AssertFailed();
9853}
9854
9855
9856VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
9857{
9858 AssertFailed();
9859}
9860
9861
9862/**
9863 * Fakes and records an I/O port read.
9864 *
9865 * @returns VINF_SUCCESS.
9866 * @param pIemCpu The IEM per CPU data.
9867 * @param Port The I/O port.
9868 * @param pu32Value Where to store the fake value.
9869 * @param cbValue The size of the access.
9870 */
9871static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9872{
9873 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9874 if (pEvtRec)
9875 {
9876 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9877 pEvtRec->u.IOPortRead.Port = Port;
9878 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9879 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9880 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9881 }
9882 pIemCpu->cIOReads++;
9883 *pu32Value = 0xcccccccc;
9884 return VINF_SUCCESS;
9885}
9886
9887
9888/**
9889 * Fakes and records an I/O port write.
9890 *
9891 * @returns VINF_SUCCESS.
9892 * @param pIemCpu The IEM per CPU data.
9893 * @param Port The I/O port.
9894 * @param u32Value The value being written.
9895 * @param cbValue The size of the access.
9896 */
9897static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9898{
9899 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9900 if (pEvtRec)
9901 {
9902 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9903 pEvtRec->u.IOPortWrite.Port = Port;
9904 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9905 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9906 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9907 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9908 }
9909 pIemCpu->cIOWrites++;
9910 return VINF_SUCCESS;
9911}
9912
9913
9914/**
9915 * Used to add extra details about a stub case.
9916 * @param pIemCpu The IEM per CPU state.
9917 */
9918static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
9919{
9920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9921 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9922 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9923 char szRegs[4096];
9924 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
9925 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
9926 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
9927 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
9928 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
9929 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
9930 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
9931 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
9932 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
9933 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
9934 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
9935 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
9936 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
9937 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
9938 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
9939 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
9940 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
9941 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
9942 " efer=%016VR{efer}\n"
9943 " pat=%016VR{pat}\n"
9944 " sf_mask=%016VR{sf_mask}\n"
9945 "krnl_gs_base=%016VR{krnl_gs_base}\n"
9946 " lstar=%016VR{lstar}\n"
9947 " star=%016VR{star} cstar=%016VR{cstar}\n"
9948 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
9949 );
9950
9951 char szInstr1[256];
9952 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
9953 DBGF_DISAS_FLAGS_DEFAULT_MODE,
9954 szInstr1, sizeof(szInstr1), NULL);
9955 char szInstr2[256];
9956 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
9957 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9958 szInstr2, sizeof(szInstr2), NULL);
9959
9960 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9961}
9962
9963
9964/**
9965 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9966 * dump to the assertion info.
9967 *
9968 * @param pEvtRec The record to dump.
9969 */
9970static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9971{
9972 switch (pEvtRec->enmEvent)
9973 {
9974 case IEMVERIFYEVENT_IOPORT_READ:
9975 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9976 pEvtRec->u.IOPortWrite.Port,
9977 pEvtRec->u.IOPortWrite.cbValue);
9978 break;
9979 case IEMVERIFYEVENT_IOPORT_WRITE:
9980 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
9981 pEvtRec->u.IOPortWrite.Port,
9982 pEvtRec->u.IOPortWrite.cbValue,
9983 pEvtRec->u.IOPortWrite.u32Value);
9984 break;
9985 case IEMVERIFYEVENT_RAM_READ:
9986 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
9987 pEvtRec->u.RamRead.GCPhys,
9988 pEvtRec->u.RamRead.cb);
9989 break;
9990 case IEMVERIFYEVENT_RAM_WRITE:
9991 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
9992 pEvtRec->u.RamWrite.GCPhys,
9993 pEvtRec->u.RamWrite.cb,
9994 (int)pEvtRec->u.RamWrite.cb,
9995 pEvtRec->u.RamWrite.ab);
9996 break;
9997 default:
9998 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
9999 break;
10000 }
10001}
10002
10003
10004/**
10005 * Raises an assertion on the specified record, showing the given message with
10006 * a record dump attached.
10007 *
10008 * @param pIemCpu The IEM per CPU data.
10009 * @param pEvtRec1 The first record.
10010 * @param pEvtRec2 The second record.
10011 * @param pszMsg The message explaining why we're asserting.
10012 */
10013static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10014{
10015 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10016 iemVerifyAssertAddRecordDump(pEvtRec1);
10017 iemVerifyAssertAddRecordDump(pEvtRec2);
10018 iemVerifyAssertMsg2(pIemCpu);
10019 RTAssertPanic();
10020}
10021
10022
10023/**
10024 * Raises an assertion on the specified record, showing the given message with
10025 * a record dump attached.
10026 *
10027 * @param pIemCpu The IEM per CPU data.
10028 * @param pEvtRec1 The first record.
10029 * @param pszMsg The message explaining why we're asserting.
10030 */
10031static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10032{
10033 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10034 iemVerifyAssertAddRecordDump(pEvtRec);
10035 iemVerifyAssertMsg2(pIemCpu);
10036 RTAssertPanic();
10037}
10038
10039
10040/**
10041 * Verifies a write record.
10042 *
10043 * @param pIemCpu The IEM per CPU data.
10044 * @param pEvtRec The write record.
10045 * @param fRem Set if REM was doing the other executing. If clear
10046 * it was HM.
10047 */
10048static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10049{
10050 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10051 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10052 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10053 if ( RT_FAILURE(rc)
10054 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10055 {
10056 /* fend off ins */
10057 if ( !pIemCpu->cIOReads
10058 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10059 || ( pEvtRec->u.RamWrite.cb != 1
10060 && pEvtRec->u.RamWrite.cb != 2
10061 && pEvtRec->u.RamWrite.cb != 4) )
10062 {
10063 /* fend off ROMs and MMIO */
10064 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10065 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10066 {
10067 /* fend off fxsave */
10068 if (pEvtRec->u.RamWrite.cb != 512)
10069 {
10070 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10071 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10072 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10073 RTAssertMsg2Add("%s: %.*Rhxs\n"
10074 "iem: %.*Rhxs\n",
10075 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10076 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10077 iemVerifyAssertAddRecordDump(pEvtRec);
10078 iemVerifyAssertMsg2(pIemCpu);
10079 RTAssertPanic();
10080 }
10081 }
10082 }
10083 }
10084
10085}
10086
10087/**
10088 * Performs the post-execution verfication checks.
10089 */
10090static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10091{
10092 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10093 return;
10094
10095 /*
10096 * Switch back the state.
10097 */
10098 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10099 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10100 Assert(pOrgCtx != pDebugCtx);
10101 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10102
10103 /*
10104 * Execute the instruction in REM.
10105 */
10106 bool fRem = false;
10107 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10108 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10109 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10110#ifdef IEM_VERIFICATION_MODE_FULL_HM
10111 if ( HMIsEnabled(pVM)
10112 && pIemCpu->cIOReads == 0
10113 && pIemCpu->cIOWrites == 0
10114 && !pIemCpu->fProblematicMemory)
10115 {
10116 unsigned iLoops = 0;
10117 do
10118 {
10119 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10120 iLoops++;
10121 } while ( rc == VINF_SUCCESS
10122 || ( rc == VINF_EM_DBG_STEPPED
10123 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10124 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10125 || ( pOrgCtx->rip != pDebugCtx->rip
10126 && pIemCpu->uInjectCpl != UINT8_MAX
10127 && iLoops < 8) );
10128 }
10129#endif
10130 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10131 || rc == VINF_IOM_R3_IOPORT_READ
10132 || rc == VINF_IOM_R3_IOPORT_WRITE
10133 || rc == VINF_IOM_R3_MMIO_READ
10134 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10135 || rc == VINF_IOM_R3_MMIO_WRITE
10136 )
10137 {
10138 EMRemLock(pVM);
10139 rc = REMR3EmulateInstruction(pVM, pVCpu);
10140 AssertRC(rc);
10141 EMRemUnlock(pVM);
10142 fRem = true;
10143 }
10144
10145 /*
10146 * Compare the register states.
10147 */
10148 unsigned cDiffs = 0;
10149 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10150 {
10151 //Log(("REM and IEM ends up with different registers!\n"));
10152 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10153
10154# define CHECK_FIELD(a_Field) \
10155 do \
10156 { \
10157 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10158 { \
10159 switch (sizeof(pOrgCtx->a_Field)) \
10160 { \
10161 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10162 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10163 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10164 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10165 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10166 } \
10167 cDiffs++; \
10168 } \
10169 } while (0)
10170
10171# define CHECK_BIT_FIELD(a_Field) \
10172 do \
10173 { \
10174 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10175 { \
10176 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10177 cDiffs++; \
10178 } \
10179 } while (0)
10180
10181# define CHECK_SEL(a_Sel) \
10182 do \
10183 { \
10184 CHECK_FIELD(a_Sel.Sel); \
10185 CHECK_FIELD(a_Sel.Attr.u); \
10186 CHECK_FIELD(a_Sel.u64Base); \
10187 CHECK_FIELD(a_Sel.u32Limit); \
10188 CHECK_FIELD(a_Sel.fFlags); \
10189 } while (0)
10190
10191#if 1 /* The recompiler doesn't update these the intel way. */
10192 if (fRem)
10193 {
10194 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
10195 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
10196 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
10197 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
10198 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
10199 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
10200 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
10201 //pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK;
10202 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
10203 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
10204 }
10205#endif
10206 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
10207 {
10208 RTAssertMsg2Weak(" the FPU state differs\n");
10209 cDiffs++;
10210 CHECK_FIELD(fpu.FCW);
10211 CHECK_FIELD(fpu.FSW);
10212 CHECK_FIELD(fpu.FTW);
10213 CHECK_FIELD(fpu.FOP);
10214 CHECK_FIELD(fpu.FPUIP);
10215 CHECK_FIELD(fpu.CS);
10216 CHECK_FIELD(fpu.Rsrvd1);
10217 CHECK_FIELD(fpu.FPUDP);
10218 CHECK_FIELD(fpu.DS);
10219 CHECK_FIELD(fpu.Rsrvd2);
10220 CHECK_FIELD(fpu.MXCSR);
10221 CHECK_FIELD(fpu.MXCSR_MASK);
10222 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
10223 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
10224 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
10225 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
10226 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
10227 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
10228 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
10229 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
10230 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
10231 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
10232 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
10233 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
10234 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
10235 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
10236 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
10237 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
10238 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
10239 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
10240 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
10241 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
10242 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
10243 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
10244 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
10245 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
10246 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
10247 CHECK_FIELD(fpu.au32RsrvdRest[i]);
10248 }
10249 CHECK_FIELD(rip);
10250 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10251 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10252 {
10253 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10254 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10255 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10256 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10257 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10258 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10259 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10260 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10261 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10262 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10263 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10264 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10265 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10266 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10267 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10268 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10269 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10270 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10271 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10272 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10273 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10274 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10275 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10276 }
10277
10278 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10279 CHECK_FIELD(rax);
10280 CHECK_FIELD(rcx);
10281 if (!pIemCpu->fIgnoreRaxRdx)
10282 CHECK_FIELD(rdx);
10283 CHECK_FIELD(rbx);
10284 CHECK_FIELD(rsp);
10285 CHECK_FIELD(rbp);
10286 CHECK_FIELD(rsi);
10287 CHECK_FIELD(rdi);
10288 CHECK_FIELD(r8);
10289 CHECK_FIELD(r9);
10290 CHECK_FIELD(r10);
10291 CHECK_FIELD(r11);
10292 CHECK_FIELD(r12);
10293 CHECK_FIELD(r13);
10294 CHECK_SEL(cs);
10295 CHECK_SEL(ss);
10296 CHECK_SEL(ds);
10297 CHECK_SEL(es);
10298 CHECK_SEL(fs);
10299 CHECK_SEL(gs);
10300 CHECK_FIELD(cr0);
10301
10302 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10303 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10304 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10305 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10306 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10307 {
10308 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10309 { /* ignore */ }
10310 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10311 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10312 && fRem)
10313 { /* ignore */ }
10314 else
10315 CHECK_FIELD(cr2);
10316 }
10317 CHECK_FIELD(cr3);
10318 CHECK_FIELD(cr4);
10319 CHECK_FIELD(dr[0]);
10320 CHECK_FIELD(dr[1]);
10321 CHECK_FIELD(dr[2]);
10322 CHECK_FIELD(dr[3]);
10323 CHECK_FIELD(dr[6]);
10324 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10325 CHECK_FIELD(dr[7]);
10326 CHECK_FIELD(gdtr.cbGdt);
10327 CHECK_FIELD(gdtr.pGdt);
10328 CHECK_FIELD(idtr.cbIdt);
10329 CHECK_FIELD(idtr.pIdt);
10330 CHECK_SEL(ldtr);
10331 CHECK_SEL(tr);
10332 CHECK_FIELD(SysEnter.cs);
10333 CHECK_FIELD(SysEnter.eip);
10334 CHECK_FIELD(SysEnter.esp);
10335 CHECK_FIELD(msrEFER);
10336 CHECK_FIELD(msrSTAR);
10337 CHECK_FIELD(msrPAT);
10338 CHECK_FIELD(msrLSTAR);
10339 CHECK_FIELD(msrCSTAR);
10340 CHECK_FIELD(msrSFMASK);
10341 CHECK_FIELD(msrKERNELGSBASE);
10342
10343 if (cDiffs != 0)
10344 {
10345 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10346 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10347 iemVerifyAssertMsg2(pIemCpu);
10348 RTAssertPanic();
10349 }
10350# undef CHECK_FIELD
10351# undef CHECK_BIT_FIELD
10352 }
10353
10354 /*
10355 * If the register state compared fine, check the verification event
10356 * records.
10357 */
10358 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10359 {
10360 /*
10361 * Compare verficiation event records.
10362 * - I/O port accesses should be a 1:1 match.
10363 */
10364 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10365 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10366 while (pIemRec && pOtherRec)
10367 {
10368 /* Since we might miss RAM writes and reads, ignore reads and check
10369 that any written memory is the same extra ones. */
10370 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10371 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10372 && pIemRec->pNext)
10373 {
10374 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10375 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10376 pIemRec = pIemRec->pNext;
10377 }
10378
10379 /* Do the compare. */
10380 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10381 {
10382 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10383 break;
10384 }
10385 bool fEquals;
10386 switch (pIemRec->enmEvent)
10387 {
10388 case IEMVERIFYEVENT_IOPORT_READ:
10389 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10390 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10391 break;
10392 case IEMVERIFYEVENT_IOPORT_WRITE:
10393 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10394 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10395 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10396 break;
10397 case IEMVERIFYEVENT_RAM_READ:
10398 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10399 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10400 break;
10401 case IEMVERIFYEVENT_RAM_WRITE:
10402 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10403 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10404 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10405 break;
10406 default:
10407 fEquals = false;
10408 break;
10409 }
10410 if (!fEquals)
10411 {
10412 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10413 break;
10414 }
10415
10416 /* advance */
10417 pIemRec = pIemRec->pNext;
10418 pOtherRec = pOtherRec->pNext;
10419 }
10420
10421 /* Ignore extra writes and reads. */
10422 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10423 {
10424 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10425 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10426 pIemRec = pIemRec->pNext;
10427 }
10428 if (pIemRec != NULL)
10429 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10430 else if (pOtherRec != NULL)
10431 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10432 }
10433 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10434}
10435
10436#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10437
10438/* stubs */
10439static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10440{
10441 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10442 return VERR_INTERNAL_ERROR;
10443}
10444
10445static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10446{
10447 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10448 return VERR_INTERNAL_ERROR;
10449}
10450
10451#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10452
10453
10454#ifdef LOG_ENABLED
10455/**
10456 * Logs the current instruction.
10457 * @param pVCpu The cross context virtual CPU structure of the caller.
10458 * @param pCtx The current CPU context.
10459 * @param fSameCtx Set if we have the same context information as the VMM,
10460 * clear if we may have already executed an instruction in
10461 * our debug context. When clear, we assume IEMCPU holds
10462 * valid CPU mode info.
10463 */
10464static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10465{
10466# ifdef IN_RING3
10467 if (LogIs2Enabled())
10468 {
10469 char szInstr[256];
10470 uint32_t cbInstr = 0;
10471 if (fSameCtx)
10472 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10473 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10474 szInstr, sizeof(szInstr), &cbInstr);
10475 else
10476 {
10477 uint32_t fFlags = 0;
10478 switch (pVCpu->iem.s.enmCpuMode)
10479 {
10480 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10481 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10482 case IEMMODE_16BIT:
10483 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10484 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10485 else
10486 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10487 break;
10488 }
10489 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10490 szInstr, sizeof(szInstr), &cbInstr);
10491 }
10492
10493 Log2(("****\n"
10494 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10495 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10496 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10497 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10498 " %s\n"
10499 ,
10500 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10501 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10502 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10503 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10504 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
10505 szInstr));
10506
10507 if (LogIs3Enabled())
10508 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10509 }
10510 else
10511# endif
10512 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10513 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10514}
10515#endif
10516
10517
10518/**
10519 * Makes status code addjustments (pass up from I/O and access handler)
10520 * as well as maintaining statistics.
10521 *
10522 * @returns Strict VBox status code to pass up.
10523 * @param pIemCpu The IEM per CPU data.
10524 * @param rcStrict The status from executing an instruction.
10525 */
10526DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10527{
10528 if (rcStrict != VINF_SUCCESS)
10529 {
10530 if (RT_SUCCESS(rcStrict))
10531 {
10532 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10533 || rcStrict == VINF_IOM_R3_IOPORT_READ
10534 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10535 || rcStrict == VINF_IOM_R3_MMIO_READ
10536 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10537 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10538 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10539 int32_t const rcPassUp = pIemCpu->rcPassUp;
10540 if (rcPassUp == VINF_SUCCESS)
10541 pIemCpu->cRetInfStatuses++;
10542 else if ( rcPassUp < VINF_EM_FIRST
10543 || rcPassUp > VINF_EM_LAST
10544 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10545 {
10546 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10547 pIemCpu->cRetPassUpStatus++;
10548 rcStrict = rcPassUp;
10549 }
10550 else
10551 {
10552 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10553 pIemCpu->cRetInfStatuses++;
10554 }
10555 }
10556 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10557 pIemCpu->cRetAspectNotImplemented++;
10558 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10559 pIemCpu->cRetInstrNotImplemented++;
10560#ifdef IEM_VERIFICATION_MODE_FULL
10561 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10562 rcStrict = VINF_SUCCESS;
10563#endif
10564 else
10565 pIemCpu->cRetErrStatuses++;
10566 }
10567 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10568 {
10569 pIemCpu->cRetPassUpStatus++;
10570 rcStrict = pIemCpu->rcPassUp;
10571 }
10572
10573 return rcStrict;
10574}
10575
10576
10577/**
10578 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10579 * IEMExecOneWithPrefetchedByPC.
10580 *
10581 * @return Strict VBox status code.
10582 * @param pVCpu The current virtual CPU.
10583 * @param pIemCpu The IEM per CPU data.
10584 * @param fExecuteInhibit If set, execute the instruction following CLI,
10585 * POP SS and MOV SS,GR.
10586 */
10587DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10588{
10589 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10590 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10591 if (rcStrict == VINF_SUCCESS)
10592 pIemCpu->cInstructions++;
10593 if (pIemCpu->cActiveMappings > 0)
10594 iemMemRollback(pIemCpu);
10595//#ifdef DEBUG
10596// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10597//#endif
10598
10599 /* Execute the next instruction as well if a cli, pop ss or
10600 mov ss, Gr has just completed successfully. */
10601 if ( fExecuteInhibit
10602 && rcStrict == VINF_SUCCESS
10603 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10604 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10605 {
10606 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10607 if (rcStrict == VINF_SUCCESS)
10608 {
10609# ifdef LOG_ENABLED
10610 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10611# endif
10612 IEM_OPCODE_GET_NEXT_U8(&b);
10613 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10614 if (rcStrict == VINF_SUCCESS)
10615 pIemCpu->cInstructions++;
10616 if (pIemCpu->cActiveMappings > 0)
10617 iemMemRollback(pIemCpu);
10618 }
10619 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10620 }
10621
10622 /*
10623 * Return value fiddling, statistics and sanity assertions.
10624 */
10625 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10626
10627 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10628 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10629#if defined(IEM_VERIFICATION_MODE_FULL)
10630 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10631 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10632 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10633 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10634#endif
10635 return rcStrict;
10636}
10637
10638
10639#ifdef IN_RC
10640/**
10641 * Re-enters raw-mode or ensure we return to ring-3.
10642 *
10643 * @returns rcStrict, maybe modified.
10644 * @param pIemCpu The IEM CPU structure.
10645 * @param pVCpu The cross context virtual CPU structure of the caller.
10646 * @param pCtx The current CPU context.
10647 * @param rcStrict The status code returne by the interpreter.
10648 */
10649DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10650{
10651 if (!pIemCpu->fInPatchCode)
10652 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
10653 return rcStrict;
10654}
10655#endif
10656
10657
10658/**
10659 * Execute one instruction.
10660 *
10661 * @return Strict VBox status code.
10662 * @param pVCpu The current virtual CPU.
10663 */
10664VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10665{
10666 PIEMCPU pIemCpu = &pVCpu->iem.s;
10667
10668#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10669 iemExecVerificationModeSetup(pIemCpu);
10670#endif
10671#ifdef LOG_ENABLED
10672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10673 iemLogCurInstr(pVCpu, pCtx, true);
10674#endif
10675
10676 /*
10677 * Do the decoding and emulation.
10678 */
10679 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10680 if (rcStrict == VINF_SUCCESS)
10681 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10682
10683#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10684 /*
10685 * Assert some sanity.
10686 */
10687 iemExecVerificationModeCheck(pIemCpu);
10688#endif
10689#ifdef IN_RC
10690 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10691#endif
10692 if (rcStrict != VINF_SUCCESS)
10693 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10694 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10695 return rcStrict;
10696}
10697
10698
10699VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10700{
10701 PIEMCPU pIemCpu = &pVCpu->iem.s;
10702 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10703 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10704
10705 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10706 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10707 if (rcStrict == VINF_SUCCESS)
10708 {
10709 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10710 if (pcbWritten)
10711 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10712 }
10713
10714#ifdef IN_RC
10715 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10716#endif
10717 return rcStrict;
10718}
10719
10720
10721VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10722 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10723{
10724 PIEMCPU pIemCpu = &pVCpu->iem.s;
10725 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10726 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10727
10728 VBOXSTRICTRC rcStrict;
10729 if ( cbOpcodeBytes
10730 && pCtx->rip == OpcodeBytesPC)
10731 {
10732 iemInitDecoder(pIemCpu, false);
10733 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10734 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10735 rcStrict = VINF_SUCCESS;
10736 }
10737 else
10738 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10739 if (rcStrict == VINF_SUCCESS)
10740 {
10741 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10742 }
10743
10744#ifdef IN_RC
10745 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10746#endif
10747 return rcStrict;
10748}
10749
10750
10751VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10752{
10753 PIEMCPU pIemCpu = &pVCpu->iem.s;
10754 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10755 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10756
10757 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10758 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10759 if (rcStrict == VINF_SUCCESS)
10760 {
10761 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10762 if (pcbWritten)
10763 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10764 }
10765
10766#ifdef IN_RC
10767 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10768#endif
10769 return rcStrict;
10770}
10771
10772
10773VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10774 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10775{
10776 PIEMCPU pIemCpu = &pVCpu->iem.s;
10777 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10778 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10779
10780 VBOXSTRICTRC rcStrict;
10781 if ( cbOpcodeBytes
10782 && pCtx->rip == OpcodeBytesPC)
10783 {
10784 iemInitDecoder(pIemCpu, true);
10785 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10786 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10787 rcStrict = VINF_SUCCESS;
10788 }
10789 else
10790 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10791 if (rcStrict == VINF_SUCCESS)
10792 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10793
10794#ifdef IN_RC
10795 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10796#endif
10797 return rcStrict;
10798}
10799
10800
10801VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10802{
10803 PIEMCPU pIemCpu = &pVCpu->iem.s;
10804
10805 /*
10806 * See if there is an interrupt pending in TRPM and inject it if we can.
10807 */
10808#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10809 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10810# ifdef IEM_VERIFICATION_MODE_FULL
10811 pIemCpu->uInjectCpl = UINT8_MAX;
10812# endif
10813 if ( pCtx->eflags.Bits.u1IF
10814 && TRPMHasTrap(pVCpu)
10815 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
10816 {
10817 uint8_t u8TrapNo;
10818 TRPMEVENT enmType;
10819 RTGCUINT uErrCode;
10820 RTGCPTR uCr2;
10821 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10822 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10823 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10824 TRPMResetTrap(pVCpu);
10825 }
10826#else
10827 iemExecVerificationModeSetup(pIemCpu);
10828 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10829#endif
10830
10831 /*
10832 * Log the state.
10833 */
10834#ifdef LOG_ENABLED
10835 iemLogCurInstr(pVCpu, pCtx, true);
10836#endif
10837
10838 /*
10839 * Do the decoding and emulation.
10840 */
10841 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10842 if (rcStrict == VINF_SUCCESS)
10843 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10844
10845#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10846 /*
10847 * Assert some sanity.
10848 */
10849 iemExecVerificationModeCheck(pIemCpu);
10850#endif
10851
10852 /*
10853 * Maybe re-enter raw-mode and log.
10854 */
10855#ifdef IN_RC
10856 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10857#endif
10858 if (rcStrict != VINF_SUCCESS)
10859 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10860 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10861 return rcStrict;
10862}
10863
10864
10865
10866/**
10867 * Injects a trap, fault, abort, software interrupt or external interrupt.
10868 *
10869 * The parameter list matches TRPMQueryTrapAll pretty closely.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The current virtual CPU.
10873 * @param u8TrapNo The trap number.
10874 * @param enmType What type is it (trap/fault/abort), software
10875 * interrupt or hardware interrupt.
10876 * @param uErrCode The error code if applicable.
10877 * @param uCr2 The CR2 value if applicable.
10878 * @param cbInstr The instruction length (only relevant for
10879 * software interrupts).
10880 */
10881VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10882 uint8_t cbInstr)
10883{
10884 iemInitDecoder(&pVCpu->iem.s, false);
10885#ifdef DBGFTRACE_ENABLED
10886 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10887 u8TrapNo, enmType, uErrCode, uCr2);
10888#endif
10889
10890 uint32_t fFlags;
10891 switch (enmType)
10892 {
10893 case TRPM_HARDWARE_INT:
10894 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10895 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10896 uErrCode = uCr2 = 0;
10897 break;
10898
10899 case TRPM_SOFTWARE_INT:
10900 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10901 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10902 uErrCode = uCr2 = 0;
10903 break;
10904
10905 case TRPM_TRAP:
10906 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10907 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10908 if (u8TrapNo == X86_XCPT_PF)
10909 fFlags |= IEM_XCPT_FLAGS_CR2;
10910 switch (u8TrapNo)
10911 {
10912 case X86_XCPT_DF:
10913 case X86_XCPT_TS:
10914 case X86_XCPT_NP:
10915 case X86_XCPT_SS:
10916 case X86_XCPT_PF:
10917 case X86_XCPT_AC:
10918 fFlags |= IEM_XCPT_FLAGS_ERR;
10919 break;
10920
10921 case X86_XCPT_NMI:
10922 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
10923 break;
10924 }
10925 break;
10926
10927 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10928 }
10929
10930 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10931}
10932
10933
10934/**
10935 * Injects the active TRPM event.
10936 *
10937 * @returns Strict VBox status code.
10938 * @param pVCpu Pointer to the VMCPU.
10939 */
10940VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
10941{
10942#ifndef IEM_IMPLEMENTS_TASKSWITCH
10943 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10944#else
10945 uint8_t u8TrapNo;
10946 TRPMEVENT enmType;
10947 RTGCUINT uErrCode;
10948 RTGCUINTPTR uCr2;
10949 uint8_t cbInstr;
10950 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
10951 if (RT_FAILURE(rc))
10952 return rc;
10953
10954 TRPMResetTrap(pVCpu);
10955 return IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10956#endif
10957}
10958
10959
10960VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10961{
10962 return VERR_NOT_IMPLEMENTED;
10963}
10964
10965
10966VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10967{
10968 return VERR_NOT_IMPLEMENTED;
10969}
10970
10971
10972#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10973/**
10974 * Executes a IRET instruction with default operand size.
10975 *
10976 * This is for PATM.
10977 *
10978 * @returns VBox status code.
10979 * @param pVCpu The current virtual CPU.
10980 * @param pCtxCore The register frame.
10981 */
10982VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
10983{
10984 PIEMCPU pIemCpu = &pVCpu->iem.s;
10985 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10986
10987 iemCtxCoreToCtx(pCtx, pCtxCore);
10988 iemInitDecoder(pIemCpu);
10989 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
10990 if (rcStrict == VINF_SUCCESS)
10991 iemCtxToCtxCore(pCtxCore, pCtx);
10992 else
10993 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10994 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10995 return rcStrict;
10996}
10997#endif
10998
10999
11000
11001/**
11002 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11003 *
11004 * This API ASSUMES that the caller has already verified that the guest code is
11005 * allowed to access the I/O port. (The I/O port is in the DX register in the
11006 * guest state.)
11007 *
11008 * @returns Strict VBox status code.
11009 * @param pVCpu The cross context per virtual CPU structure.
11010 * @param cbValue The size of the I/O port access (1, 2, or 4).
11011 * @param enmAddrMode The addressing mode.
11012 * @param fRepPrefix Indicates whether a repeat prefix is used
11013 * (doesn't matter which for this instruction).
11014 * @param cbInstr The instruction length in bytes.
11015 * @param iEffSeg The effective segment address.
11016 */
11017VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11018 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11019{
11020 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11021 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11022
11023 /*
11024 * State init.
11025 */
11026 PIEMCPU pIemCpu = &pVCpu->iem.s;
11027 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11028
11029 /*
11030 * Switch orgy for getting to the right handler.
11031 */
11032 VBOXSTRICTRC rcStrict;
11033 if (fRepPrefix)
11034 {
11035 switch (enmAddrMode)
11036 {
11037 case IEMMODE_16BIT:
11038 switch (cbValue)
11039 {
11040 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11041 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11042 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11043 default:
11044 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11045 }
11046 break;
11047
11048 case IEMMODE_32BIT:
11049 switch (cbValue)
11050 {
11051 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11052 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11053 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11054 default:
11055 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11056 }
11057 break;
11058
11059 case IEMMODE_64BIT:
11060 switch (cbValue)
11061 {
11062 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11063 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11064 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11065 default:
11066 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11067 }
11068 break;
11069
11070 default:
11071 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11072 }
11073 }
11074 else
11075 {
11076 switch (enmAddrMode)
11077 {
11078 case IEMMODE_16BIT:
11079 switch (cbValue)
11080 {
11081 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11082 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11083 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11084 default:
11085 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11086 }
11087 break;
11088
11089 case IEMMODE_32BIT:
11090 switch (cbValue)
11091 {
11092 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11093 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11094 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11095 default:
11096 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11097 }
11098 break;
11099
11100 case IEMMODE_64BIT:
11101 switch (cbValue)
11102 {
11103 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11104 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11105 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11106 default:
11107 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11108 }
11109 break;
11110
11111 default:
11112 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11113 }
11114 }
11115
11116 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11117}
11118
11119
11120/**
11121 * Interface for HM and EM for executing string I/O IN (read) instructions.
11122 *
11123 * This API ASSUMES that the caller has already verified that the guest code is
11124 * allowed to access the I/O port. (The I/O port is in the DX register in the
11125 * guest state.)
11126 *
11127 * @returns Strict VBox status code.
11128 * @param pVCpu The cross context per virtual CPU structure.
11129 * @param cbValue The size of the I/O port access (1, 2, or 4).
11130 * @param enmAddrMode The addressing mode.
11131 * @param fRepPrefix Indicates whether a repeat prefix is used
11132 * (doesn't matter which for this instruction).
11133 * @param cbInstr The instruction length in bytes.
11134 */
11135VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11136 bool fRepPrefix, uint8_t cbInstr)
11137{
11138 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11139
11140 /*
11141 * State init.
11142 */
11143 PIEMCPU pIemCpu = &pVCpu->iem.s;
11144 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11145
11146 /*
11147 * Switch orgy for getting to the right handler.
11148 */
11149 VBOXSTRICTRC rcStrict;
11150 if (fRepPrefix)
11151 {
11152 switch (enmAddrMode)
11153 {
11154 case IEMMODE_16BIT:
11155 switch (cbValue)
11156 {
11157 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11158 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11159 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11160 default:
11161 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11162 }
11163 break;
11164
11165 case IEMMODE_32BIT:
11166 switch (cbValue)
11167 {
11168 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11169 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11170 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11171 default:
11172 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11173 }
11174 break;
11175
11176 case IEMMODE_64BIT:
11177 switch (cbValue)
11178 {
11179 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11180 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11181 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11182 default:
11183 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11184 }
11185 break;
11186
11187 default:
11188 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11189 }
11190 }
11191 else
11192 {
11193 switch (enmAddrMode)
11194 {
11195 case IEMMODE_16BIT:
11196 switch (cbValue)
11197 {
11198 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11199 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11200 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11201 default:
11202 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11203 }
11204 break;
11205
11206 case IEMMODE_32BIT:
11207 switch (cbValue)
11208 {
11209 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11210 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11211 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11212 default:
11213 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11214 }
11215 break;
11216
11217 case IEMMODE_64BIT:
11218 switch (cbValue)
11219 {
11220 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11221 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11222 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11223 default:
11224 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11225 }
11226 break;
11227
11228 default:
11229 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11230 }
11231 }
11232
11233 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11234}
11235
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette